hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aae1dd014067580d0af7ac2900087b8005afa226 | 9,713 | py | Python | self_test/predict_emulator_install_user.py | appleface2050/Coursera-ML | e588fa5776a79d6516b2135124898a2db9da82ae | [
"MIT"
] | null | null | null | self_test/predict_emulator_install_user.py | appleface2050/Coursera-ML | e588fa5776a79d6516b2135124898a2db9da82ae | [
"MIT"
] | null | null | null | self_test/predict_emulator_install_user.py | appleface2050/Coursera-ML | e588fa5776a79d6516b2135124898a2db9da82ae | [
"MIT"
] | null | null | null | import datetime
import seaborn as sns
import numpy as np
import pandas as pd
# import MySQLdb as SQL
import pymysql.cursors
import matplotlib.pyplot as plt
from helper import linear_regression as lr
from helper import general as general
con = {"host": '60.205.94.60',
"port": 33009,
"user": 'bluestackscn',
"password": 'Bluestacks2016',
"db": 'bs_datastats',
"charset": 'utf8',
"cursorclass": pymysql.cursors.DictCursor}
con_monitor = {"host": '60.205.94.60',
"port": 33006,
"user": 'bluestackscn',
"password": 'Bluestacks2016',
"db": 'bst-monitor',
"charset": 'utf8',
"cursorclass": pymysql.cursors.DictCursor}
def get_emulator_install_data(start):
"""
获取某一天的模拟器安装成功人数
"""
connection = pymysql.connect(**con)
data = []
try:
with connection.cursor() as cursor:
# 执行sql语句,插入记录
sql = """
SELECT result_date, install_success_user FROM stats_emulator
WHERE result_date ="%s"
AND scope_id = 1
""" % (start)
# print (sql)
cursor.execute(sql)
result = cursor.fetchall()
for i in result:
data.append(i["install_success_user"])
# 没有设置默认自动提交,需要主动提交,以保存所执行的语句
# connection.commit()
finally:
connection.close()
if not data:
return 0
else:
return data[0]
def get_whole_day_result_by_date_range(dt_start, dt_end):
connection = pymysql.connect(**con)
data = []
try:
with connection.cursor() as cursor:
# 执行sql语句,插入记录
sql = """
SELECT result_date, install_success_user FROM stats_emulator
WHERE result_date >="%s" and result_date <"%s"
AND scope_id = 1
""" % (dt_start, dt_end)
# print (sql)
cursor.execute(sql)
result = cursor.fetchall()
for i in result:
data.append(i["install_success_user"])
# 没有设置默认自动提交,需要主动提交,以保存所执行的语句
# connection.commit()
finally:
connection.close()
return data
def get_monitor_data_for_one_day_with_hour_sql(start, hour_sql):
data = [1]
connection_monitor = pymysql.connect(**con_monitor)
try:
with connection_monitor.cursor() as cursor:
# 执行sql语句,插入记录
sql = """
SELECT HOUR, install_success_user FROM monitor_odps_emulatormonitorstats
WHERE result_date="%s" AND HOUR in (%s)
ORDER BY HOUR asc
""" % (start, hour_sql)
# print (sql)
cursor.execute(sql)
result = cursor.fetchall()
for i in result:
data.append(i["install_success_user"])
finally:
connection_monitor.close()
return data
def get_monitor_data_for_one_day(dt_start):
data = [1]
connection_monitor = pymysql.connect(**con_monitor)
try:
with connection_monitor.cursor() as cursor:
# 执行sql语句,插入记录
sql = """
SELECT HOUR, install_success_user FROM monitor_odps_emulatormonitorstats
WHERE result_date="%s" AND HOUR in ("00", "01", "02", "03", "04", "05", "06", "07", "08")
ORDER BY HOUR asc
""" % (dt_start)
# print (sql)
cursor.execute(sql)
result = cursor.fetchall()
for i in result:
data.append(i["install_success_user"])
# 没有设置默认自动提交,需要主动提交,以保存所执行的语句
# connection_monitor.commit()
finally:
# pass
connection_monitor.close()
return data
def get_monitor_data_by_date_range_and_hour(hour, dt_start, dt_end):
connection_monitor = pymysql.connect(**con_monitor)
data = []
try:
with connection_monitor.cursor() as cursor:
# 执行sql语句,插入记录
sql = """
SELECT result_date, HOUR, install_success_user FROM monitor_odps_emulatormonitorstats
WHERE result_date >="%s" AND result_date< "%s" AND HOUR = "%s"
ORDER BY result_date asc
""" % (dt_start, dt_end, hour)
# print (sql)
cursor.execute(sql)
result = cursor.fetchall()
for i in result:
data.append(i["install_success_user"])
# # 没有设置默认自动提交,需要主动提交,以保存所执行的语句
# connection.commit()
finally:
connection_monitor.close()
return data
def predict_for_one_day(start):
"""
查看一天的预测结果和预测效果
"""
data_list = get_monitor_data_for_one_day(start)
x = np.array(data_list)
y_predict = x @ theta_ne
# y_real = get_emulator_install_data(start)
# print("predict:", y_predict, "误差", abs(y_real-y_predict)/float(y_real))
return int(y_predict)
def get_next_90_day(start):
start = datetime.datetime.strptime(start, '%Y-%m-%d')
start_90_before = start - datetime.timedelta(days=91)
return start_90_before.strftime('%Y-%m-%d')
def predict_for_one_day_all_hours(start, hours=None):
"""
生成一天所有小时的预测数据, 用这天之前90日数据来计算
"""
# init_monitor_dict = {}
if hours is None:
hours = ["00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17",
"18", "19", "20", "21", "22"]
hour_in_use_list = []
for hour in hours:
init_monitor_dict = {}
hour_in_use_list.append(hour)
# print(hour_in_use_list)
hour_sql = ""
for h in hour_in_use_list:
item = "'%s'," % str(h)
hour_sql += item
hour_sql = hour_sql.strip(",")
# print(hour_sql)
# sql = """
# SELECT HOUR, install_success_user FROM monitor_odps_emulatormonitorstats
# WHERE result_date="%s" AND HOUR in (%s)
# ORDER BY HOUR asc
# """ % (start, hour_sql)
# print (sql)
# generate X
if hour >= "08":
for h in hour_in_use_list:
init_monitor_dict[h] = get_monitor_data_by_date_range_and_hour(h, get_next_90_day(start), start)
# print(init_monitor_dict)
init_monitor_dict["y"] = get_whole_day_result_by_date_range(get_next_90_day(start), start)
df_monitor = pd.DataFrame(init_monitor_dict)
# print(df_monitor)
ones = pd.DataFrame({'ones': np.ones(len(df_monitor))})
data = pd.concat([ones, df_monitor], axis=1) # column concat
X = data.iloc[:, :-1].as_matrix() # this return ndarray, not matrix
y = np.array(df_monitor.iloc[:, -1])
theta_ne = lr.normal_equations(X, y)
# print(theta_ne)
data_list = get_monitor_data_for_one_day_with_hour_sql(start, hour_sql)
x = np.array(data_list)
y_predict = x @ theta_ne
y_real = get_emulator_install_data(start)
# print("predict:", y_predict, "误差", abs(y_real-y_predict)/float(y_real))
try:
error_rate = abs(y_real - y_predict) / float(y_real)
except Exception as e:
error_rate = 0
print("start:", start, "hour:", hour, "predict:",int(y_predict), "real:",y_real, "error rate:",)
def predict_for_one_day_with_p(start):
"""
查看一天的预测结果和预测效果
"""
data_list = get_monitor_data_for_one_day(start)
x = np.array(data_list)
y_predict = x @ theta_ne
y_real = get_emulator_install_data(start)
# print("predict:", y_predict, "误差", abs(y_real-y_predict)/float(y_real))
return (int(y_predict), y_real, abs(y_real - y_predict) / float(y_real))
# init_monitor_dict = {}
# try:
# with connection.cursor() as cursor:
# # 执行sql语句,插入记录
# sql_date = 'select DISTINCT DATE_FORMAT(result_date, "%Y-%m-%d") as dt from stats_emulator where scope_id=1 and result_date >="2017-09-01" and result_date < "2017-09-30"'
# cursor.execute(sql_date)
# result = cursor.fetchall()
# for i in result:
# # print(i)
# dt_list.append(i["dt"])
# # 没有设置默认自动提交,需要主动提交,以保存所执行的语句
# connection.commit()
# finally:
# pass
# print (dt_list)
# init_monitor_dict["date"] = dt_list
# for hour in ["00", "01", "02", "03", "04", "05", "06", "07", "08"]:
# init_monitor_dict[hour] = get_monitor_data_by_date_range_and_hour(hour, "2017-10-01", "2017-12-10")
#
# init_monitor_dict["y"] = get_whole_day_result_by_date_range("2017-10-01", "2017-12-10")
#
# # print (init_monitor_dict)
#
# df_monitor = pd.DataFrame(init_monitor_dict)
# print(df_monitor)
# print(df_monitor.info())
# connection.close()
# connection_monitor.close()
# df_show = pd.DataFrame({"x":df_monitor["05"], "y":df_monitor["y"]})
# sns.lmplot("01","y", df_monitor)
# # plt.plot(df_monitor["00"], df_monitor["y"])
# plt.show()
# get X
# ones = pd.DataFrame({'ones': np.ones(len(df_monitor))})
# data = pd.concat([ones, df_monitor], axis=1) # column concat
# X = data.iloc[:, :-1].as_matrix() # this return ndarray, not matrix
# y = np.array(df_monitor.iloc[:, -1])
# alpha = 0.01
# theta = np.zeros(X.shape[1])
# theta.shape
#
# epoch = 500
# final_theta, cost_data = lr.batch_gradient_decent(theta, X, y, epoch, alpha=alpha)
# cost_data[-1]
# theta_ne = lr.normal_equations(X, y)
# print(theta_ne)
# print(lr.cost(theta_ne, X, y))
###################查看一日的多时间点,预测准确率
# print(predict_for_one_day_with_p("2017-12-10"))
# print(predict_for_one_day("2017-12-11"))
predict_for_one_day_all_hours("2017-12-12", ["00","01", "02", "03", "04", "05", "06", "07", "08", "09", "10"])
| 32.162252 | 180 | 0.586945 | 1,240 | 9,713 | 4.33629 | 0.159677 | 0.026781 | 0.033476 | 0.020829 | 0.732565 | 0.691464 | 0.616515 | 0.589362 | 0.572252 | 0.533755 | 0 | 0.036799 | 0.278184 | 9,713 | 301 | 181 | 32.269103 | 0.730138 | 0.283332 | 0 | 0.556962 | 0 | 0.006329 | 0.201888 | 0.0146 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056962 | false | 0.012658 | 0.050633 | 0 | 0.164557 | 0.006329 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
aae2c2ec4211b67f155b2267755a9235294658fc | 2,855 | py | Python | setup.py | rahulmohan/VariantWorks | 4d22a0a4c3246bf76b3b60f5bb24ec282a6b3e85 | [
"Apache-2.0"
] | null | null | null | setup.py | rahulmohan/VariantWorks | 4d22a0a4c3246bf76b3b60f5bb24ec282a6b3e85 | [
"Apache-2.0"
] | null | null | null | setup.py | rahulmohan/VariantWorks | 4d22a0a4c3246bf76b3b60f5bb24ec282a6b3e85 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright 2020 NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Python setuptools setup."""
import os
from setuptools import setup, find_packages
def get_verified_absolute_path(path):
"""Verify and return absolute path of argument.
Args:
path : Relative/absolute path
Returns:
Absolute path
"""
installed_path = os.path.abspath(path)
if not os.path.exists(installed_path):
raise RuntimeError("No valid path for requested component exists")
return installed_path
def get_installation_requirments(file_path):
"""Parse pip requirements file.
Args:
file_path : path to pip requirements file
Returns:
list of requirement strings
"""
with open(file_path, 'r') as file:
requirements_file_content = \
[line.strip() for line in file if
line.strip() and not line.lstrip().startswith('#')]
return requirements_file_content
# Get current dir (pyclaragenomics folder is copied into a temp directory
# created by pip)
current_dir = os.path.dirname(os.path.realpath(__file__))
# Classifiers for PyPI
pyaw_classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9"
]
setup(name='variantworks',
version='0.1.0',
description='NVIDIA genomics python libraries and utiliites',
author='NVIDIA Corporation',
url="https://github.com/clara-parabricks/VariantWorks",
include_package_data=True,
install_requires=[get_installation_requirments(
get_verified_absolute_path(
os.path.join(current_dir, 'requirements.txt')))
],
packages=find_packages(where=current_dir, include=["variantworks*"]),
python_requires='>=3.7',
long_description='Python libraries and utilities for manipulating '
'genomics data',
classifiers=pyaw_classifiers,
platforms=['any'],
)
| 30.698925 | 75 | 0.685114 | 347 | 2,855 | 5.533141 | 0.512968 | 0.03125 | 0.065104 | 0.067708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011126 | 0.21296 | 2,855 | 92 | 76 | 31.032609 | 0.843347 | 0.33275 | 0 | 0 | 0 | 0 | 0.354839 | 0.012028 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.046512 | 0 | 0.139535 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
aae33bff89e2c83cc0677f22cea3f3c9b6efd1fe | 5,924 | py | Python | galpy/df/eddingtondf.py | davidhendel/galpy | 9654e2e181d26abaac4a4fba49375887fb290d36 | [
"BSD-3-Clause"
] | null | null | null | galpy/df/eddingtondf.py | davidhendel/galpy | 9654e2e181d26abaac4a4fba49375887fb290d36 | [
"BSD-3-Clause"
] | null | null | null | galpy/df/eddingtondf.py | davidhendel/galpy | 9654e2e181d26abaac4a4fba49375887fb290d36 | [
"BSD-3-Clause"
] | null | null | null | # Class that implements isotropic spherical DFs computed using the Eddington
# formula
import numpy
from scipy import interpolate, integrate
from ..util import conversion
from ..potential import evaluateR2derivs
from ..potential.Potential import _evaluatePotentials, _evaluateRforces
from .sphericaldf import isotropicsphericaldf, sphericaldf
class eddingtondf(isotropicsphericaldf):
"""Class that implements isotropic spherical DFs computed using the Eddington formula
.. math::
f(\\mathcal{E}) = \\frac{1}{\\sqrt{8}\\,\\pi^2}\\,\\left[\\int_0^\\mathcal{E}\\mathrm{d}\\Psi\\,\\frac{1}{\\sqrt{\\mathcal{E}-\\Psi}}\\,\\frac{\\mathrm{d}^2\\rho}{\\mathrm{d}\\Psi^2} +\\frac{1}{\\sqrt{\\mathcal{E}}}\\,\\frac{\\mathrm{d}\\rho}{\\mathrm{d}\\Psi}\\Bigg|_{\\Psi=0}\\right]\\,,
where :math:`\\Psi = -\\Phi+\\Phi(\\infty)` is the relative potential, :math:`\\mathcal{E} = \\Psi-v^2/2` is the relative (binding) energy, and :math:`\\rho` is the density of the tracer population (not necessarily the density corresponding to :math:`\\Psi` according to the Poisson equation). Note that the second term on the right-hand side is currently assumed to be zero in the code."""
def __init__(self,pot=None,denspot=None,rmax=1e4,
scale=None,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
Initialize an isotropic distribution function computed using the Eddington inversion
INPUT:
pot= (None) Potential instance or list thereof that represents the gravitational potential (assumed to be spherical)
denspot= (None) Potential instance or list thereof that represent the density of the tracers (assumed to be spherical; if None, set equal to pot)
rmax= (1e4) when sampling, maximum radius to consider (can be Quantity)
ro=, vo= galpy unit parameters
OUTPUT:
None
HISTORY:
2021-02-04 - Written - Bovy (UofT)
"""
isotropicsphericaldf.__init__(self,pot=pot,denspot=denspot,rmax=rmax,
scale=scale,ro=ro,vo=vo)
self._dnudr= self._denspot._ddensdr \
if not isinstance(self._denspot,list) \
else lambda r: numpy.sum([p._ddensdr(r) for p in self._denspot])
self._d2nudr2= self._denspot._d2densdr2 \
if not isinstance(self._denspot,list) \
else lambda r: numpy.sum([p._d2densdr2(r) for p in self._denspot])
self._potInf= _evaluatePotentials(pot,self._rmax,0)
self._Emin= _evaluatePotentials(pot,0,0)
# Build interpolator r(pot)
r_a_values= numpy.concatenate(\
(numpy.array([0.]),
numpy.geomspace(1e-6,1e6,10001)))
self._rphi= interpolate.InterpolatedUnivariateSpline(\
[_evaluatePotentials(self._pot,r*self._scale,0)
for r in r_a_values],r_a_values*self._scale,k=3)
def sample(self,R=None,z=None,phi=None,n=1,return_orbit=True):
# Slight over-write of superclass method to first build f(E) interp
# No docstring so superclass' is used
if not hasattr(self,'_fE_interp'):
Es4interp= numpy.hstack((numpy.geomspace(1e-8,0.5,101,
endpoint=False),
sorted(1.-numpy.geomspace(1e-4,0.5,101))))
Es4interp= (Es4interp*(self._Emin-self._potInf)+self._potInf)[::-1]
fE4interp= self.fE(Es4interp)
iindx= numpy.isfinite(fE4interp)
self._fE_interp= interpolate.InterpolatedUnivariateSpline(\
Es4interp[iindx],fE4interp[iindx],
k=3,ext=3)
return sphericaldf.sample(self,R=R,z=z,phi=phi,n=n,
return_orbit=return_orbit)
def fE(self,E):
"""
NAME:
fE
PURPOSE
Calculate the energy portion of a DF computed using the Eddington inversion
INPUT:
E - The energy (can be Quantity)
OUTPUT:
fE - The value of the energy portion of the DF
HISTORY:
2021-02-04 - Written - Bovy (UofT)
"""
Eint= conversion.parse_energy(E,vo=self._vo)
out= numpy.zeros_like(Eint)
indx= (Eint < self._potInf)*(Eint >= self._Emin)
# Split integral at twice the lower limit to deal with divergence at
# the lower end and infinity at the upper end
out[indx]= numpy.array([integrate.quad(
lambda t: _fEintegrand_smallr(t,self._pot,tE,
self._dnudr,self._d2nudr2,
self._rphi(tE)),
0.,numpy.sqrt(self._rphi(tE)),
points=[0.])[0] for tE in Eint[indx]])
out[indx]+= numpy.array([integrate.quad(
lambda t: _fEintegrand_larger(t,self._pot,tE,
self._dnudr,self._d2nudr2),
0.,0.5/self._rphi(tE))[0] for tE in Eint[indx]])
return -out/(numpy.sqrt(8.)*numpy.pi**2.)
def _fEintegrand_raw(r,pot,E,dnudr,d2nudr2):
# The 'raw', i.e., direct integrand in the Eddington inversion
Fr= _evaluateRforces(pot,r,0)
return (Fr*d2nudr2(r)
+dnudr(r)*evaluateR2derivs(pot,r,0,use_physical=False))\
/Fr**2.\
/numpy.sqrt(_evaluatePotentials(pot,r,0)-E)
def _fEintegrand_smallr(t,pot,E,dnudr,d2nudr2,rmin):
# The integrand at small r, using transformation to deal with sqrt diverge
return 2.*t*_fEintegrand_raw(t**2.+rmin,pot,E,dnudr,d2nudr2)
def _fEintegrand_larger(t,pot,E,dnudr,d2nudr2):
# The integrand at large r, using transformation to deal with infinity
return 1./t**2*_fEintegrand_raw(1./t,pot,E,dnudr,d2nudr2)
| 43.240876 | 394 | 0.598413 | 748 | 5,924 | 4.625668 | 0.300802 | 0.019075 | 0.013006 | 0.023121 | 0.247688 | 0.216763 | 0.16763 | 0.115607 | 0.098266 | 0.07052 | 0 | 0.027002 | 0.28106 | 5,924 | 136 | 395 | 43.558824 | 0.785396 | 0.35449 | 0 | 0.031746 | 0 | 0 | 0.002833 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.095238 | 0.031746 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a9a569ddf00b5c148463eda6bba12fb4a108370 | 1,121 | py | Python | 714_max_profit.py | xinming365/LeetCode | e56097a60ddd1b5ddba7f15a726661c2aa6633e7 | [
"Apache-2.0"
] | null | null | null | 714_max_profit.py | xinming365/LeetCode | e56097a60ddd1b5ddba7f15a726661c2aa6633e7 | [
"Apache-2.0"
] | null | null | null | 714_max_profit.py | xinming365/LeetCode | e56097a60ddd1b5ddba7f15a726661c2aa6633e7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2022/3/17 11:52 上午
# @Author : xinming
# @File : 714_max_profit.py
from typing import List
class Solution2:
def maxProfit(self, prices: List[int], fee: int) -> int:
n = len(prices)
buy = prices[0] + fee
profit = 0
for i in range(1, n):
# 之后考虑手续费的影响
if prices[i]+fee<buy:
buy=prices[i]+fee
elif prices[i]>buy:
profit += (prices[i]-buy)
buy=prices[i]
return profit
class Solution:
def maxProfit(self, prices: List[int], fee: int) -> int:
# dp[i][0]: 没有股票最大利润
# dp[i][1]: 有股票最大利润
n = len(prices)
dp = [[0 for i in range(2)] for i in range(n)]
dp[0][0]=0
dp[0][1]=-prices[0]
for i in range(1, n):
dp[i][0]=max(dp[i-1][0], dp[i-1][1]+prices[i]-fee)
dp[i][1]= max(dp[i-1][0]-prices[i], dp[i-1][1])
return dp[n-1][0]
if __name__=='__main__':
prices = [1,3,7,5,10,3]
out = Solution2().maxProfit(prices=prices, fee=3)
print(out)
| 27.341463 | 62 | 0.49777 | 176 | 1,121 | 3.113636 | 0.329545 | 0.043796 | 0.043796 | 0.080292 | 0.240876 | 0.189781 | 0.189781 | 0.138686 | 0.138686 | 0 | 0 | 0.067282 | 0.323818 | 1,121 | 40 | 63 | 28.025 | 0.655673 | 0.149866 | 0 | 0.222222 | 0 | 0 | 0.008466 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.037037 | 0 | 0.259259 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a9d1513ed786c1ca051d9689a094e7314b0815d | 15,503 | py | Python | script.py | khttemp/dend-comic-script | 0abbb2ab7f418c72924e68bb1db61c7879422609 | [
"MIT"
] | null | null | null | script.py | khttemp/dend-comic-script | 0abbb2ab7f418c72924e68bb1db61c7879422609 | [
"MIT"
] | null | null | null | script.py | khttemp/dend-comic-script | 0abbb2ab7f418c72924e68bb1db61c7879422609 | [
"MIT"
] | null | null | null | import struct
import os
import sys
cmd = [
"Tx",
"TxSize",
"Alpha",
"End",
"Pos",
"ColorALL",
"Move",
"STAGE_BGM",
"SetFlat3D",
"ChangeFlat3D",
"SetCamDir",
"DisCamDir",
"Set3DObj",
"SetWAngleX",
"SetWAngleY",
"SetWAngleZ",
"SetLAngleX",
"SetLAngleY",
"SetLAngleZ",
"SetBoneWAngleX",
"SetBoneWAngleY",
"SetBoneWAngleZ",
"SetBoneLAngleX",
"SetBoneLAngleY",
"SetBoneLAngleZ",
"ShowMesh",
"HideMesh",
"PlayAnime",
"Length_End",
"SetScall",
"RACE_START",
"RACE_END",
"FADE_STAGE_BGM",
"CHANGE_SCENE",
"LPos",
"LMove",
"LLoopX",
"LLoopY",
"LLoopZ",
"Angle",
"AngleLoop",
"Move2",
"PosX",
"PosY",
"PosZ",
"PlaySE",
"SET_MT_NONE",
"SetCamPos",
"SetCamTarget",
"CamMoveWait",
"SetComic",
"ComicPos",
"ComicAlpha",
"ComicWait",
"Scene_to_Comic",
"SKY_DOME",
"Fill_BG",
"ComicEnd",
"CamComtroll",
"ComicSceneStop",
"BtnWait",
"EyeMove",
"SetZoom",
"BG_Alpha",
"BG_Wait",
"StartCount",
"WaitMoveEye",
"WaitFrame",
"FTV_Play",
"FTV_Wait",
"HideMsgWnd",
"FTV_End",
"SkipEventPoint",
"SkipEventFlg",
"PlayComicSE",
"StopComicSE",
"PlayComicBGM",
"StopComicBGM",
"VolComicBGM",
"HideALLComic",
"Stage_BGM_Vol",
"SET_CPU_FLG",
"SET_CPU_MODE",
"CHK_LENGTH",
"END_CHK_LENGTH",
"CHK_POSTION",
"END_CHK_POSTION",
"WAIT_MOTION",
"END_WAIT_MOTION",
"CHANGE_SPEED",
"CHANGE_CAM_TYPE",
"Set2P",
"CharChk_and_Tx",
"ChangeR",
"ChangeG",
"ChangeB",
"ChangeColor",
"SetGray",
"MoveX",
"MoveY",
"MoveZ",
"SetUV_X",
"RePlay",
"IsStart",
"ShowGoal",
"CHK_WIN_TRAIN",
"END_CHK_WINTRAIN",
"N_ADD_OBJ",
"N_POS",
"START_TIME_LINE",
"N_MOVE",
"WAIT_TIME_LINE",
"N_DEL_OBJ",
"SCREEN_FADE",
"N_CHANGE_ANIME",
"TRAIN_SPEED",
"TRAIN_FLG",
"SCENE_LIGHT",
"CHANGE_CAM_LENGTH",
"CHANGE_CAM_DIRX",
"CHANGE_CAM_DIRY",
"CHANGE_CAM_DIRZ",
"R_Drift",
"L_Drift",
"IS_TRAIN_HIT",
"TO_RAIL",
"SLEEP_TRAIN",
"RandWAngle",
"RandMove",
"ADD_OBJ",
"START_COMIC",
"SetRand3DObj",
"Offset3DObj",
"RandPos",
"RandPlaySE",
"RandAngleX",
"RandAngleY",
"RandAngleZ",
"CHK_TRAIN_STATE",
"END_CHK_TRAIN_STATE",
"CHK_TRAIN_SPEED_U",
"CHK_TRAIN_SPEED_D",
"END_CHK_TRAIN_SPEED_U",
"END_CHK_TRAIN_SPEED_D",
"ChkStory_and_Tx",
"ClearStory_and_Tx",
"N_L_ANGLE_X",
"N_L_ANGLE_Y",
"N_L_ANGLE_Z",
"Comic_Glay",
"N_MoveMesh_X",
"N_MoveMesh_Y",
"N_MoveMesh_Z",
"SetComic_Blur",
"SetComic_Blur_Speed",
"TRACK_BOMB",
"Hide_Sky_Doom",
"ADD_POINT",
"CHK_POINT",
"ELSE_CHK_POINT",
"ELSE_IF_CHK_POINT",
"END_CHK_POINT",
"GOTO_SCRIPT",
"SHEAK_COMIC",
"STORY_OPEN",
"STORY_CLEAR",
"CHAR_OPEN",
"SAVE_GAME",
"KEISUKE_COUNT",
"RandPlayComicSE",
"TITLE_MODE",
"GOING",
"RAND_IF",
"ELSE_RAND_IF",
"END_RAND_IF",
"CHK_SP_BREAK",
"END_CHK_SP_BREAK",
"CHK_DRIFT",
"END_CHK_DRIFT",
"ENDING_MODE",
"ChkCause_and_Tx",
"SET_DRAW_TYPE",
"To_TxSize",
"OPEN_CAUSE",
"DIS_TRAIN_SPEED",
"CHK_RACE_TIME",
"END_CHK_RACE_TIME",
"End_Comic",
"WAIT_RAIL",
"END_WAIT_RAIL",
"COMIC_SCALE",
"USO_COUNT",
"WaitRandPlaySE",
"FROM",
"GOTO",
"CHK_TRAIN_TYPE",
"RAND_IF_AVG",
"CHK_NOTCH",
"WAIT_RAIL_ONLY",
"ONE_TRACK_DRIFT",
"LAST_STATION",
"OSSAN",
"SET_TAIL_SCALE",
"OPEN_HUTA",
"SET_GN",
"MDL_GETINDEX",
"INDEX_BONE_ROT_X",
"INDEX_BONE_ROT_Y",
"INDEX_BONE_ROT_Z",
"INDEX_BONE_L_ROT_X",
"INDEX_BONE_L_ROT_Y",
"INDEX_BONE_L_ROT_Z",
"CREATE_INDEX",
"IB_LI_CREATE_ROT_X",
"IB_LI_CREATE_ROT_Y",
"IB_LI_CREATE_ROT_Z",
"IB_LI_SET_ROT_X",
"IB_LI_SET_ROT_Y",
"IB_LI_SET_ROT_Z",
"IB_LI_SET_LOOP_X",
"IB_LI_SET_LOOP_Y",
"IB_LI_SET_LOOP_Z",
"ADD_MY_OBJ",
"INDEX_BONE_L_POS_X",
"INDEX_BONE_L_POS_Y",
"INDEX_BONE_L_POS_Z",
"IB_LI_CREATE_L_POS_X",
"IB_LI_CREATE_L_POS_Y",
"IB_LI_CREATE_L_POS_Z",
"IB_LI_SET_L_POS_X",
"IB_LI_SET_L_POS_Y",
"IB_LI_SET_L_POS_Z",
"FROM_ADDMT",
"MOVE_UV_X",
"MOVE_UV_Y",
"CREATE_UV_MOVE_X",
"IB_LI_SET_LOOP_LPOSX",
"IB_LI_SET_LOOP_LPOSY",
"IB_LI_SET_LOOP_LPOSZ",
"RELEASE_ALL_IB_LIST",
"ADD_MY_OBJ_INDEX",
"TO_TAGET_POS",
"ATK_HIT",
"ATK_END",
"SET_RELEASE_PARAM",
"CREATE_LENSFLEAR",
"SET_LENSFLEAR_PARAM",
"SET_LENSFLEAR_MT",
"RAIL_POS_TO_BUFF",
"BUFF_TO_CAM_POS",
"BUFF_TO_TARGET_POS",
"FTV_BASE_PROC",
"FTV_NEXT_PROC",
"MDL_INDEX_TO_VIEW",
"SET_FOG_LENGTH",
"SET_UV_MOVE_X",
"SET_UV_LOOP_X",
"CREATE_MESH_INDEX",
"SET_MESH_INDEX",
"INDEX_BONE_L_ADD_ROT_X",
"INDEX_BONE_L_ADD_ROT_Y",
"INDEX_BONE_L_ADD_ROT_Z",
"CHANGE_SCALL",
"CHK_CLEAR_STORY",
"CHK_OPEN_STORY",
"SET_LENSFLEAR_ALL_FLG",
"CHK_USE_CHAR",
"SET_OBJ_FOG_NO",
"SET_OBJ_RENDER_ID",
"PLAY_STAGE_BGM",
"CHANGE_TRAIN_FOG",
"FIRST_OBJ_SET_ANIME",
"SET_CAMPOINT_2P2C",
"SET_CAMPOINT_1P2C",
"CAM_POINT_PER",
"CAM_TARGET_PER",
"SET_CAM_POINT_LENGTH",
"SET_CAM_OFFSET",
"START_WIPER",
"CREATE_TRAIN_ORG",
"ORG_SET_RAIL",
"ORG_ADD",
"SET_CAMPOINT_K",
"ORG_SET_POS",
"ORG_SET_FOG",
"ORG_RELEASE",
"PLAY_FTV_END",
"CNG_TRAIN_MAT_COL",
"CNG_ORG_MAT_COL",
"IS_CAUTION",
"ENDWAIT_COMIC",
"SET_COMIC_BG_COLOR",
"TX_2_TRAIN",
"CHANGE_MT_COL_TRAIN",
"CNG_MT_COL",
"RETURN",
"ReLoadSE",
"BASE_POINT_CAM",
"STOP_3D",
"STOP_STAGE_BGM",
"TRAIN_UD",
"SET_CAM_TARGET_OFFSET",
"SET_CAM_POINT_1T_ROT",
"SET_CAM_T_LENGHT",
"SET_CAM_T_ROT_X",
"SET_CAM_T_ROT_Y",
"SET_CAM_T_OFFSET",
"NO_OUTRUN",
"SET_WHEEL_FIRE",
"RELOAD_OP_TRAIN",
"BackR_Drift",
"BackL_Drift",
"CHK_MOTION",
"ORG_SET_STYLE_POS",
"RECREATE_TRAIN",
"SET_CAMPOINT_1P2T",
"BUFF_TO_SC_CAM_POS",
"SC_ORG_MODE_CHANGE",
"SC_ORG_INIT_POS",
"SC_ORG_SET_POS",
"SC_ORG_SET_ROT",
"SC_ORG_SET_X_ROT",
"SC_ORG_SET_Y_ROT",
"SC_ORG_SET_Z_ROT",
"SET_SC_KOTEI_CAM_POS",
"SET_SC_KOTEI_CAM_T_POS",
"START_SC_WIPER",
"SUPER_DRIFT",
"CNG_TRAIN_NO_MAT_COL",
"ERR_CMD",
"K_HN",
"TO_TRACK_RAIL",
"IS_NO_DRAMA",
"CNG_TRAIN_NO_MAT_RGBA",
"SHOW_RECORD",
"WAIT_RECORD_END",
"IB_LI_SET_UPDATE_FLG",
"PTCL_SCALL",
"PTCL_COLOR",
"PTCL_ALPHA",
"PTCL_DRAWTYPE",
"PTCL_ANGLE",
"PTCL_RAND_ANGLE",
"PTCL_RAND_COLOR",
"PTCL_RAND_ALPHA",
"PTCL_RAND_SCALL",
"IB_ADD_PTCL",
"PTCL_RAND_TONE_COLOR",
"IS_ALPHA_END",
"PTCL_L_POS",
"PTCL_RAND_L_POS",
"CREATE_MAT_COLOR_R_INTERLIST",
"CREATE_MAT_EMISSIVE_R_INTERLIST",
"SET_MAT_COLOR_R",
"SET_MAT_COLOR_G",
"SET_MAT_COLOR_B",
"SET_MAT_COLOR_LOOP",
"SET_MAT_EMISSIVE_R",
"SET_MAT_EMISSIVE_G",
"SET_MAT_EMISSIVE_B",
"SET_MAT_EMISSIVE_LOOP",
"CREATE_MAT_COLOR_G_INTERLIST",
"CREATE_MAT_EMISSIVE_G_INTERLIST",
"CREATE_MAT_COLOR_B_INTERLIST",
"CREATE_MAT_EMISSIVE_B_INTERLIST",
"CREATE_UV_MOVE_Y",
"SET_UV_MOVE_Y",
"SET_UV_LOOP_Y",
"INDEX_RAND_ROT_X",
"INDEX_RAND_ROT_Y",
"INDEX_RAND_ROT_Z",
"INDEX_RAND_POS_X",
"INDEX_RAND_POS_Y",
"INDEX_RAND_POS_Z",
"RAND_SHOW_MESH",
"INDEX_RAND_SCALL",
"ADD_CHILD_OBJ",
"ADD_OBJ_INDEX",
"GAS_TARBIN",
"ENGINE_START",
"CHANGE_CHILDOBJ_ANIME",
"IB_SET_W_MT",
"CHK_OBJ_PARAM",
"SET_OBJ_PARAM",
"INDEX_DIR_CAM",
"CNG_MT_LIGHT",
"ADD_OBJ_INDEX2",
"CNG_MT_ALPHA",
"CREATE_MAT_ALPHA_INTERLIST",
"SET_MAT_ALPHA",
"RESTART_MESH_LIST",
"RAIL_ANIME_CHANGE",
"STOP_COMIC_SE_ALL",
"HURIKO",
"FTV_PLAY_AND_PREV",
"FTV_END_INHERIT",
"STATION_NAME_PRIORITY",
"ALL_FIT",
"SWAP_TX",
"CNG_TX",
"CHK_CAUSE",
"CNG_ANIME",
"CHK_OUHUKU",
"SET_TRAIN_PTCL_AREA",
"WAIT_DOSAN_LENGTH",
"END_DOSAN_LENGTH",
"DOSANSEN",
"MESH_INDEX_SE_UV_ANIME_FLG",
"WEATHER",
"TRAIN_DIR",
"IS_USE_CHAR",
"QUICK_SAVE_EVENT",
"NONE_GOAL",
"ENGINE_STOP",
"IS_BTL_MODE",
"IS_FREE_MODE",
"FIRST_OBJ_SET_ANIME_SCENE",
"G_HIDE_MESH",
"G_SHOW_MESH",
"STOP_WIPER",
"TRAIN_ANIME_CHANGE",
"MESH_INDEX_UV_RESTRT",
"SET_COMIC_COLOR",
"CHK_OUTRUN_CNT",
"CHK_D_AND_NOTCH",
"ADD_CPU_LEN_OUTRUN",
"ADD_CPU_SPEED_D_AND_NOTCH",
"CHK_HIT_CNT",
"TOP_SPEED_HOSYO",
"SET_ROOT_BLOCK",
"RIFT",
"COLLISION",
"DIR_VIEW_CHANGE",
"CHK_RAIL_NO",
"TRACK_CHANGE",
"CHK_LENGTH_DIR",
"CHK_POS_DIR",
"TRUE_CLASH",
"KATARIN_RUN",
"DRAW_UI",
"STOP_SCRIPT_BGM",
"SET_STATION_NO",
"SET_CPU_BREAKE",
"AMB_ANIME",
"ONE_DRIFT_FALSE",
"L_One_Drift",
"R_One_Drift",
"Ret_One_Drift",
"FRONT_JUMP",
"REAR_JUMP",
"FRONT_MOVE_X",
"TRACK_MOVE",
"TRAIN_JUMP",
"SET_LIGHT",
"SET_COL_KASENCHU",
"SET_KAISO",
"SET_FOR",
"CHK_TRAIN_COL",
"VOL_SCRIPT_BGM",
"IF_NOTCH",
"SET_BRIND_SW",
"SET_MIKOSHI",
"ADD_FIRE",
"BREAKE_OR_HIT",
"OUTRUN",
"SOFT_ATK",
"RAIL_STOP",
"CHANGE_OUHUKU_LINE",
"BRIND_ATK",
"OPEN_POS_DLG",
"PLAY_STAGEBGM_BLOCK",
"SET_BTL_POINT",
"CAM_TRAIN",
"PLAY_SCRIPT_BGM",
"CNG_FOR",
"SET_RAILBLOCK_CHECKER",
"RAIN_SE",
"TRAIN_STOP",
"KOTEICAM_BLEND",
"SCRIPT_RAIN",
"LINE_CHANGE",
"WAIT_RAIL_MORE_ONLY",
"SET_SE_VOL",
"CAM_TARGET_TRACK",
"DECAL_D37",
"DECAL_D39",
"DECAL_SMOKE",
"RAIL_PRIORITY",
"GET_KEY",
"SHOW_LIGHT",
"SHOW_IN_LIGHT",
"FOG_POW",
"STORY_WIN",
"RAIN_PARTICLE",
"D39_FIRE",
"SET_CPU_SPEED",
"BODY_AUDIO_PLAY",
"BODY_AUDIO_STOP",
"CNG_FADE_SPRITE",
"RAIL_DRIFT_CHK",
"INQ_WAIT",
"CNG_SCCAM_TRAIN",
"STOP_TRAIN_SE",
"PLAY_SCRIPT_BGM_TIME",
"CNG_BODY_COLOR",
"LOAD_TRAIN",
"SHOW_BLOCK",
"UPDATE_LIGHT_FRARE",
"WAIT_RAIL_MORE_GOTO",
"CREATE_AURA",
"AURA_ALPHA",
"SET_LV_JUMP",
"CREATE_EFFECT_CAM",
"TO_EFFECT_CAM",
"EFFECT_CAM_POW",
"EFFECT_CAM_COLOR",
"EFFECT_CAM_ALPHA",
"HIDE_LIGHT",
"USE_EFFECT_CAM",
"USE_EFFECT_CAM_RGB",
"EFFECT_CAM_RGB",
"COPY_TRAIN_POS",
"COL_SET",
"CNG_CPU_TRAIN",
"BTN_GOTO",
"NO_TIMESCALE_KOMA",
"EFFCAM_NOIZE",
"EFFCAM_GRI",
"EFFCAM_BLOCKNOISE",
"CREATE_TQ5000_FLAGMENT",
"USE_TQ5000_FLAGMENT",
"TQ5000_FLAGPOS",
"HUMIKIRI_VOL",
"TO_EFFECT_CAM_BODY",
"TO_NORM_CAM",
"TO_920",
"NO_TIMESCALE_FVT",
"CNG_TARGET_BODY",
"SC_ADD_POINT",
"CHK_SC_POINT",
"KAISO_TO_DUEL",
"SHOW_ST",
"ORG_UPDATE",
"SET_RAILBLOCK_POS",
"SET_LIGHT_OVER",
"CREATE_STAFFROLL",
"STAFFROLL_START",
"WAIT_STAFFROLL",
"SC_OUTRUN",
"CREATE_TAKMIS",
"SET_TAKMIS_POS",
"SET_TAKMIS_ALPHA",
"FRONT_DOOR",
"SET_KOMA_DEPTH",
"D37_FIRE",
"AMB_HIT_WAIT",
"ShowRecord",
"FIT_PER",
"CREATE_COMIC_PC",
"SET_COMIC_PC",
"PAUSE_STAGE_BGM",
"SET_KAKAPO",
"KOMA_KAKAPO",
"START_TARBINE",
"END_TARBINE",
"TARBINE_FTV_START",
"TARBINE_FTV_END",
"STORY_ENGINE",
"RAND_GOTO",
"KQ_SOUND",
"STORY_GOTO",
"PLAY223HONE",
"RB26",
"PLAYORGSE",
"H2300_GOAL",
"SCRIPT_CMD_MAX"
]
slowFlag = False
print("DEND COMIC SCRIPT ver2.0.1...")
file = input("comicのbinファイル名を入力してください: ")
try:
try:
f = open(file, "rb")
line = f.read()
f.close()
except FileNotFoundError:
errorMsg = "指定されたファイルが見つかりません。終了します。"
print(errorMsg)
input()
sys.exit()
print("見つけました!")
slow = input("ReadComicDataを1行ずつ読みますか?(Y/N): ")
while True:
if slow == "Y" or slow == "y":
slowFlag = True
break
elif slow == "N" or slow == "n":
slowFlag = False
break
else:
slow = input("入力エラー!改めて入力してください:")
size = len(line)
index = 16
header = line[0:index]
if header != b'DEND_COMICSCRIPT':
raise Exception
index += 1
print()
#ReadComicImg
print("ReadComicImg...")
imgCnt = line[index]
index += 1
for i in range(imgCnt):
b = line[index]
index += 1
text = line[index:index+b].decode()
print("{0} -> {1}".format(i, text))
index += b
print()
#ReadComicSize
print("ReadComicSize...")
b = line[index]
index += 1
for i in range(b):
index += 1
print("{0} -> ".format(i), end="")
for j in range(4):
text = line[index:index+4]
f = struct.unpack("<f", text)[0]
print(f, end=", ")
index += 4
print()
print()
#ReadSE
print("ReadSE...")
secnt = line[index]
index += 1
for i in range(secnt):
b = line[index]
index += 1
text = line[index:index+b].decode()
print("{0} -> {1}".format(i, text))
index += b
index += 1
print()
#ReadBGM
print("ReadBGM...")
bgmcnt = line[index]
index += 1
for i in range(bgmcnt):
b = line[index]
index += 1
text = line[index:index+b].decode()
print("{0} -> {1}".format(i, text))
index += b
index += 1
index += 4
index += 4
print()
#ReadComicData
print("ReadComicData...")
index += 1
num = struct.unpack("<H", line[index:index+2])[0]
index += 2
count = 0
for i in range(num):
if index >= size:
errorMsg = "注意!設定したコマンド数({0})は、書き込んだコマンド数({1})より多く読もうとしています。".format(num, count)
print(errorMsg)
input()
sys.exit()
print("No.{0} -> index({1})".format(i, hex(index)))
num2 = struct.unpack("<H", line[index:index+2])[0]
index += 2
if num2 < 0 or num2 >= len(cmd)-1:
errorMsg = "定義されてないコマンド番号です({0})。読込を終了します。".format(num2)
print(errorMsg)
input()
sys.exit()
print("cmd -> {0}({1})".format(cmd[num2], num2))
b = line[index]
index += 1
if b >= 16:
print("script Error!")
b = 16
print("cmd_cnt -> {0}".format(b))
array = []
for j in range(b):
array.append(struct.unpack("<f", line[index:index+4])[0])
index += 4
print("cmd_param -> {0}".format(array))
if slowFlag:
input()
else:
print()
count += 1
if index < size:
errorMsg = "注意!設定したコマンド数({0})は、書き込んだコマンド数より少なく設定されています".format(count)
print(errorMsg)
else:
print("正常に読み込みできました。終了します。")
input()
sys.exit()
except Exception as e:
errorMsg = "電車でDのコミックスクリプトではない、またはファイルが壊れた可能性があります。"
print(errorMsg)
sys.exit()
| 21.178962 | 92 | 0.582855 | 1,950 | 15,503 | 4.138462 | 0.243077 | 0.009418 | 0.026022 | 0.01487 | 0.097026 | 0.066295 | 0.055143 | 0.055143 | 0.034325 | 0.034325 | 0 | 0.010152 | 0.269303 | 15,503 | 731 | 93 | 21.207934 | 0.702242 | 0.00329 | 0 | 0.090652 | 0 | 0 | 0.504823 | 0.052761 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.004249 | 0 | 0.004249 | 0.042493 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2a9e13aa9fcd7da01249fb6480b7c3da53e466c9 | 1,649 | py | Python | FileScan.py | limijd/fileSync | 4e519f87e2c4737cc8954d374e75c80ae26cfd88 | [
"Apache-2.0"
] | null | null | null | FileScan.py | limijd/fileSync | 4e519f87e2c4737cc8954d374e75c80ae26cfd88 | [
"Apache-2.0"
] | null | null | null | FileScan.py | limijd/fileSync | 4e519f87e2c4737cc8954d374e75c80ae26cfd88 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=C0103,W0703,R0902,R1711,R0912,R0914,R0911
import os
import sys
import re
import argparse
import logging
import scandir
class FileScan:
def __init__(self, args=None):
pass
def scan(self, directory, all_files=None, file_types=None, all_fns=None):
logging.debug("Scan %s ...", directory)
if all_files == None:
all_files = {}
if file_types == None:
file_types = {}
if all_fns == None:
all_fns = {}
for entry in os.scandir(directory):
if entry.is_file():
ty = os.path.splitext(entry.name)[1]
stat = entry.stat()
if ty in file_types:
count, sz_count,files = file_types[ty]
sz_count = sz_count + stat.st_size
files.append(entry.path)
file_types[ty] = [count+1, sz_count, files]
else:
file_types[ty] = [1, stat.st_size, [entry.path] ]
all_files[entry.path] = [entry.name, entry.path, entry.stat(), entry.is_symlink(), None]
if entry.name in all_fns:
all_fns[entry.name].append(entry.path)
else:
all_fns[entry.name] = [entry.path]
elif entry.is_dir():
self.scan(entry.path, all_files, file_types, all_fns)
return all_files, file_types, all_fns
if __name__ == "__main__":
fs = FileScan()
all_files, file_types = fs.scan(sys.argv[1])
for k,v in file_types.items():
print(k,v)
| 31.711538 | 104 | 0.545179 | 212 | 1,649 | 4.018868 | 0.320755 | 0.116197 | 0.065728 | 0.059859 | 0.053991 | 0.053991 | 0 | 0 | 0 | 0 | 0 | 0.031164 | 0.338387 | 1,649 | 51 | 105 | 32.333333 | 0.749771 | 0.061249 | 0 | 0.04878 | 0 | 0 | 0.012306 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0.02439 | 0.146341 | 0 | 0.243902 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aa0d0d3c9032bc1251f46e72b76ee9963ad6ca4 | 554 | py | Python | naming.py | AdamPI314/sensitivity_analysis | 719ef83643e39580626e69df3bfeb0f60ec882b2 | [
"MIT"
] | 1 | 2018-11-20T09:18:04.000Z | 2018-11-20T09:18:04.000Z | naming.py | AdamPI314/sensitivity_analysis | 719ef83643e39580626e69df3bfeb0f60ec882b2 | [
"MIT"
] | null | null | null | naming.py | AdamPI314/sensitivity_analysis | 719ef83643e39580626e69df3bfeb0f60ec882b2 | [
"MIT"
] | null | null | null | """
sensitivity analysis naming
"""
def index_transition(n_dim, exclude=None):
"""
suppose orginal list is [0, 1, ..., n_dim - 1]
exclude = [0, 2]
return two dictionaries,
1), original to new dictionary
2), and new to original dictionary
"""
o_2_n = dict()
n_2_o = dict()
counter = 0
for i in range(n_dim):
if i not in exclude:
o_2_n[i] = counter
n_2_o[counter] = i
counter += 1
else:
continue
return o_2_n, n_2_o
| 21.307692 | 51 | 0.519856 | 78 | 554 | 3.487179 | 0.461538 | 0.044118 | 0.033088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043605 | 0.379061 | 554 | 25 | 52 | 22.16 | 0.747093 | 0.32852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aa158de6b1b4e20fca39503d35f5462fdbea57b | 10,893 | py | Python | bsq.py | jarethholt/polyTEOS10 | e034fb4d35bd02b2ea640daad79cb78c8b493a8c | [
"MIT"
] | null | null | null | bsq.py | jarethholt/polyTEOS10 | e034fb4d35bd02b2ea640daad79cb78c8b493a8c | [
"MIT"
] | null | null | null | bsq.py | jarethholt/polyTEOS10 | e034fb4d35bd02b2ea640daad79cb78c8b493a8c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Boussinesq equations of state.
This module provides two functions for the equation of state (EOS) of seawater
suitable for Boussinesq ocean models. In both cases, the thermodynamic
variables are absolute salinity, conservative temperature, and depth. In
comparison, the standard formulation of an EOS is in terms of absolute
salinity, in-situ temperature, and pressure.
These equations of state are given as polynomials in temperature, pressure, and
a salinity-related variable. The evaluation of polynomials and their
derivatives is implemented in the `poly` module.
"""
# Import statements
import numpy as np
import aux
from const import GRAV, SRED, TRED, ZRED, RHOBSQ, DTASBSQ, NPZFILE
# Load the relevant coefficients
with np.load(NPZFILE) as cdata:
CBSQ0, CBSQ1, IJBSQ, CSTIF0, CSTIF1, IJSTIF = [
cdata[name] for name in ('BSQ0', 'BSQ1', 'ijmaxs_BSQ1', 'STIF0',
'STIF1', 'ijmaxs_STIF1')]
# Equation of state functions
def eos_bsq0(dpth):
"""Calculate Boussinesq density reference profile.
Calculate the reference profile for Boussinesq density, i.e. the principal
component of density that depends only on depth.
Arguments:
dpth (float or array): Seawater depth in m; equivalently, seawater
reference hydrostatic pressure in dbar.
Returns:
r0 (float or array): Density in kg m-3.
r0z (float or array): Derivative of density with respect to depth, in
units of kg m-3 m-1.
"""
zet = dpth / ZRED
r0, r0z = aux.poly1d_1der(zet, CBSQ0)
return (r0, r0z)
def eos_bsq1(salt, tcon, dpth):
"""Calculate Boussinesq density anomaly.
Calculate the anomaly from the reference profile for Boussinesq density.
Arguments:
salt (float or array): Absolute salinity in g kg-1.
tcon (float or array): Conservative temperature in degrees Celsius.
dpth (float or array): Seawater depth in m; equivalently, seawater
reference hydrostatic pressure in dbar.
Returns:
r1 (float or array): Density anomaly in kg m-3.
r1s, r1t, r1z (float or array): Derivatives of the density anomaly with
respect to salinity, temperature, and depth.
"""
sig = ((salt+DTASBSQ)/SRED)**.5
tau = tcon / TRED
zet = dpth / ZRED
r1, r1s, r1t, r1z = aux.poly3d_1der(sig, tau, zet, CBSQ1, IJBSQ)
return (r1, r1s, r1t, r1z)
def eos_bsq(salt, tcon, dpth):
"""Calculate Boussinesq density.
Calculate the density and related quantities using a Boussinesq
approximation from the salinity, temperature, and pressure. In the
Boussinesq form, any term divided by the full density should use the
reference density RHOBSQ (1020 kg m-3).
The points can be given as numpy arrays as long as they are broadcastable
against each other; all outputs will have this shape and type.
Arguments:
salt (float or array): Absolute salinity in g kg-1.
tcon (float or array): Conservative temperature in degrees Celsius.
dpth (float or array): Seawater depth in m; equivalently, seawater
reference hydrostatic pressure in dbar.
Returns:
rho (float or array): Density in kg m-3.
absq (float or array): Modified thermal expansion coefficient
(-drho/dtcon) in kg m-3 K-1.
bbsq (float or array): Modified haline contraction coefficient
(drho/dsalt) in kg m-3 (g kg-1)-1.
csnd (float or array): Speed of sound in m s-1.
Examples
--------
>>> eos_bsq(30.,10.,1e3) #doctest: +NORMALIZE_WHITESPACE
(1027.4514011715235,
0.17964628133829566,
0.7655553707894517,
1500.2086843982124)
"""
# Calculate reference profile and anomaly of density
r0, r0z = eos_bsq0(dpth)
r1, r1s, r1t, r1z = eos_bsq1(salt, tcon, dpth)
# Calculate physically-relevant quantities
rho = r0 + r1
absq = -r1t/TRED
bbsq = r1s / (2 * ((salt+DTASBSQ)*SRED)**.5)
csnd = (RHOBSQ*GRAV*ZRED/(r0z + r1z))**.5
return (rho, absq, bbsq, csnd)
def eos_stif0(dpth):
"""Calculate stiffened density reference profile.
Calculate the reference profile of stiffened Boussinesq density, i.e. the
principal depth-dependent component.
Arguments:
dpth (float or array): Seawater depth in m; equivalently, seawater
reference hydrostatic pressure in dbar.
Returns:
r1 (float or array): Reference profile of density in kg m-3.
r1z (float or array): Derivative of the reference profile with respect
to depth, in units of kg m-3 m-1.
"""
zet = dpth / ZRED
r1, r1z = aux.poly1d_1der(zet, CSTIF0)
return (r1, r1z)
def eos_stif1(salt, tcon, dpth):
"""Calculate stiffened density scaling factor.
Calculate the scaling factor of the stiffened density, the multiplicative
correction to the reference profile due to salinity and temperature.
Arguments:
salt (float or array): Absolute salinity in g kg-1.
tcon (float or array): Conservative temperature in degrees Celsius.
dpth (float or array): Seawater depth in m; equivalently, seawater
reference hydrostatic pressure in dbar.
Returns:
rdot (float or array): Density scaling factor, unitless.
rdots, rdott, rdotz (float or array): Derivatives of the scaling factor
with respect to salinity, temperature, and depth.
"""
sig = ((salt+DTASBSQ)/SRED)**.5
tau = tcon / TRED
zet = dpth / ZRED
rdot, rdots, rdott, rdotz = aux.poly3d_1der(sig, tau, zet, CSTIF1, IJSTIF)
return (rdot, rdots, rdott, rdotz)
def eos_stif(salt, tcon, dpth):
"""Calculate stiffened density.
Calculate the density and related quantities using a stiffened Boussinesq
approximation from the salinity, temperature, and pressure. In the
Boussinesq form, any term divided by the full density should use the
reference density RHOBSQ (1020 kg m-3).
The points can be given as numpy arrays as long as they are broadcastable
against each other; all outputs will have this shape and type.
Arguments:
salt (float or array): Absolute salinity in g kg-1.
tcon (float or array): Conservative temperature in degrees Celsius.
dpth (float or array): Seawater depth in m; equivalently, seawater
reference hydrostatic pressure in dbar.
Returns:
rho (float or array): Density in kg m-3.
absq (float or array): Modified thermal expansion coefficient
(-drho/dtcon) in kg m-3 K-1.
bbsq (float or array): Modified haline contraction coefficient
(drho/dsalt) in kg m-3 (g kg-1)-1.
csnd (float or array): Speed of sound in m s-1.
Examples
--------
>>> eos_stif(30.,10.,1e3) #doctest: +NORMALIZE_WHITESPACE
(1027.4514038962773,
0.1796494059656094,
0.7655544988472869,
1500.2088411949183)
"""
# Calculate reference profile and scaling factor
r1, r1z = eos_stif0(dpth)
rdot, rdots, rdott, rdotz = eos_stif1(salt, tcon, dpth)
# Return all physical quantities
rho = r1 * rdot
absq = -r1/TRED * rdott
bbsq = r1*rdots / (2 * ((salt+DTASBSQ)*SRED)**.5)
csnd = (RHOBSQ*GRAV*ZRED/(rdot*r1z + r1*rdotz))**.5
return (rho, absq, bbsq, csnd)
# Additional functions
def stratification(absq, bbsq, dctdz, dsadz):
"""Calculate the Boussinesq stratification.
Calculate the square of the buoyancy frequency for a Boussinesq system,
i.e. the stratification. Here, absq and bbsq are the modified thermal
expansion and haline contraction coefficients returned by either `eos_bsq`
or `eos_stif`, which are both Boussinesq equations of state.
The points can be given as numpy arrays as long as they are broadcastable
against each other; all outputs will have this shape and type. In
particular, the expansion and contraction coefficients have to be on the
same vertical grid as the temperature and salinity gradients.
Arguments:
absq (float or array): Modified thermal expansion coefficient
(-drho/dtcon) in kg m-3 K-1.
bbsq (float or array): Modified haline contraction coefficient
(drho/dsalt) in kg m-3 (g kg-1)-1.
dctdz (float or array): Vertical gradient of the conservative
temperature in K m-1.
dsadz (float or array): Vertical gradient of the absolute salinity
in g kg-1 m-1.
Returns:
nsq (float or array): Squared buoyancy frequency in s-2.
Examples
--------
>>> stratification(.18,.77,2e-3,-5e-3)
4.0476467156862744e-05
>>> __, absq, bbsq, __ = eos_bsq(30.,10.,1e3)
>>> stratification(absq,bbsq,2e-3,-5e-3)
4.025600421032772e-05
>>> __, absq, bbsq, __ = eos_stif(30.,10.,1e3)
>>> stratification(absq,bbsq,2e-3,-5e-3)
4.0256022377087266e-05
"""
nsq = GRAV/RHOBSQ * (absq*dctdz - bbsq*dsadz)
return nsq
def potenergy_bsq0(dpth):
"""Calculate the Boussinesq potential energy reference profile.
Calculate the potential energy in the Boussinesq case corresponding to the
reference profile of density.
Arguments:
dpth (float or array): Seawater depth in m; equivalently, seawater
reference hydrostatic pressure in dbar.
Returns:
epot0 (float or array): Potential energy in J m-3.
"""
# Construct coefficients of depth integral
kmax = CBSQ0.size - 1
cep0 = np.zeros(kmax+2)
cep0[1:] = CBSQ0 / np.arange(1, kmax+2)
# Evaluate polynomial
zet = dpth / ZRED
epot0 = aux.poly1d(zet, cep0) * ZRED
return epot0
def potenergy_bsq1(salt, tcon, dpth):
"""Calculate the Boussinesq potential energy anomaly.
Calculate the potential energy in the Boussinesq case corresponding to the
density anomaly.
Arguments:
salt (float or array): Absolute salinity in g kg-1.
tcon (float or array): Conservative temperature in degrees Celsius.
dpth (float or array): Seawater depth in m; equivalently, seawater
reference hydrostatic pressure in dbar.
Returns:
epot1 (float or array): Potential energy in J m-3.
"""
# Construct coefficients of the depth integral
kmax = IJBSQ.size - 1
ijep1 = np.zeros(kmax+2, dtype=int)
ijep1[1:] = IJBSQ
cep1 = np.zeros(CBSQ1.size+1)
cep1[1:] = CBSQ1
ind = 1
for k in range(1, kmax+2):
ijmax = ijep1[k]
nij = (ijmax+1)*(ijmax+2)//2
cep1[ind:ind+nij] /= k
ind += nij
# Evaluate polynomial
sig = ((salt+DTASBSQ)/SRED)**.5
tau = tcon / TRED
zet = dpth / ZRED
epot1 = aux.poly3d(sig, tau, zet, cep1, ijep1) * ZRED
return epot1
# Main script: Run doctest
if __name__ == '__main__':
import doctest
doctest.testmod()
| 34.801917 | 79 | 0.665657 | 1,483 | 10,893 | 4.861092 | 0.184086 | 0.039811 | 0.068248 | 0.009155 | 0.582328 | 0.564572 | 0.492024 | 0.458455 | 0.454987 | 0.43085 | 0 | 0.049316 | 0.247957 | 10,893 | 312 | 80 | 34.913462 | 0.830688 | 0.706142 | 0 | 0.191781 | 0 | 0 | 0.019178 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.123288 | false | 0 | 0.054795 | 0 | 0.30137 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aa22b7add06f638f3f712b06e7048abcab0ea82 | 1,578 | py | Python | projects/microphysics/experiments/precpd-limiting.py | ai2cm/fv3net | e62038aee0a97d6207e66baabd8938467838cf51 | [
"MIT"
] | 1 | 2021-12-14T23:43:35.000Z | 2021-12-14T23:43:35.000Z | projects/microphysics/experiments/precpd-limiting.py | ai2cm/fv3net | e62038aee0a97d6207e66baabd8938467838cf51 | [
"MIT"
] | 195 | 2021-09-16T05:47:18.000Z | 2022-03-31T22:03:15.000Z | projects/microphysics/experiments/precpd-limiting.py | ai2cm/fv3net | e62038aee0a97d6207e66baabd8938467838cf51 | [
"MIT"
] | null | null | null | import sys
import secrets
sys.path.insert(0, "../argo")
from end_to_end import EndToEndJob, load_yaml, submit_jobs # noqa: E402
group = secrets.token_hex(3)
def _get_job(
config_name: str,
exp_tag: str,
limit_negative_qc: bool = False,
limit_negative_qv: bool = False,
):
config = load_yaml(f"../train/{config_name}.yaml")
if not limit_negative_qc:
# Tensor transforms are hard to edit programmatically
# Do a simple check to error if things change
key = "cloud_water_mixing_ratio_after_precpd"
assert config["tensor_transform"][0]["source"] == key
assert config["tensor_transform"][0]["to"] == key
del config["tensor_transform"][0]
if limit_negative_qv:
key = "specific_humidity_after_precpd"
config["tensor_transform"].insert(
0, dict(source=key, to=key, transform=dict(lower=0.0))
)
return EndToEndJob(
name=f"precpd-limiting-{exp_tag}-{group}",
fv3fit_image_tag="82654b2321ac6f4dc2fdc743588ae335598982e0",
image_tag="82654b2321ac6f4dc2fdc743588ae335598982e0",
ml_config=config,
prog_config=load_yaml("../configs/default.yaml"),
)
jobs = [
_get_job("rnn-no-limiting", "rnn-control"),
_get_job("rnn", "rnn-limit-precpd-tend"),
_get_job("rnn", "rnn-limit-precpd-tend-limit-qc", limit_negative_qc=True),
_get_job(
"rnn",
"rnn-limit-precpd-tend-limit-qc-qv",
limit_negative_qc=True,
limit_negative_qv=True,
),
]
submit_jobs(jobs, f"dqc-precpd-limiting")
| 28.178571 | 78 | 0.660963 | 203 | 1,578 | 4.876847 | 0.403941 | 0.091919 | 0.060606 | 0.066667 | 0.152525 | 0.09596 | 0.09596 | 0.068687 | 0.068687 | 0 | 0 | 0.054618 | 0.211027 | 1,578 | 55 | 79 | 28.690909 | 0.740562 | 0.067174 | 0 | 0 | 0 | 0 | 0.304496 | 0.213896 | 0 | 0 | 0 | 0 | 0.04878 | 1 | 0.02439 | false | 0 | 0.073171 | 0 | 0.121951 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aa465b4884d95c5ae1adb351d0771da2d7c5701 | 2,991 | py | Python | libs/member_operate.py | xujpxm/v_charge | abb1237eeca066cec435680e38ab6878a8d4ac27 | [
"MIT"
] | 3 | 2017-08-18T07:52:32.000Z | 2019-08-31T12:47:10.000Z | libs/member_operate.py | xujpxm/v_charge | abb1237eeca066cec435680e38ab6878a8d4ac27 | [
"MIT"
] | null | null | null | libs/member_operate.py | xujpxm/v_charge | abb1237eeca066cec435680e38ab6878a8d4ac27 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
import traceback
import vymgmt
from vpn.models import Member
logger = logging.getLogger('vpn')
def disable_member(username):
"""
禁用租户
:return:
"""
try:
member = Member.objects.get(username=username)
host = member.vpn_server.host
port = member.vpn_server.port
vyos = vymgmt.Router(address=host, user='vyos',
port=port)
vyos.login()
logger.info("VyOS login success~")
vyos.configure()
vyos.set(
"vpn l2tp remote-access authentication local-users username %s disable" % username)
vyos.commit()
vyos.save()
vyos.exit()
vyos.logout()
logger.info("VyOS config success~")
member.is_enabled = False
member.save()
logger.info("user %s is disabled" % username)
return True
except Exception:
logger.error(traceback.format_exc())
return False
def enable_member(member_obj):
"""
启用vyos用户
:username: vyos vpn username
:return: True or False
"""
try:
member = member_obj
username = member.username
host = member.vpn_server.host
port = member.vpn_server.port
vyos = vymgmt.Router(address=host, user='vyos', port=port)
vyos.login()
vyos.configure()
vyos.delete(
'vpn l2tp remote-access authentication local-users username %s disable' % username)
vyos.commit()
vyos.save()
vyos.exit()
vyos.logout()
member.is_enabled = True
member.save()
logger.info('VPN User {0} is enabled'.format(username))
return True
except Exception:
logger.error(traceback.format_exc())
return False
def create_member(member_obj):
"""
开通租户
:return:
"""
try:
member = member_obj
username = member.username
host = member.vpn_server.host
port = member.vpn_server.port
password = member.password
address = member.static_ip
vyos = vymgmt.Router(address=host, user='vyos',
port=port, password='yourpassword')
vyos.login()
logger.info("VyOS login success~")
vyos.configure()
vyos.set(
"vpn l2tp remote-access authentication local-users username %s password %s" % (username, password))
vyos.set(
"vpn l2tp remote-access authentication local-users username %s static-ip %s" % (username, address))
vyos.commit()
logger.info("VyOS configuration commit success~")
vyos.save()
logger.info("VyOS configuration save success~")
vyos.exit()
vyos.logout()
member.is_enabled = True
member.save()
return True
except Exception:
logger.error(traceback.format_exc())
return False
| 28.759615 | 111 | 0.591107 | 326 | 2,991 | 5.343558 | 0.220859 | 0.040184 | 0.051665 | 0.043628 | 0.622273 | 0.622273 | 0.622273 | 0.622273 | 0.622273 | 0.597589 | 0 | 0.00289 | 0.305918 | 2,991 | 103 | 112 | 29.038835 | 0.836224 | 0.039786 | 0 | 0.679012 | 0 | 0 | 0.170349 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0.037037 | 0.061728 | 0 | 0.17284 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aa6cf205bc5a598b9f5e36809eaaf56c7b2a052 | 1,363 | py | Python | Ex105.py | Fernando-Rodrigo/Exercicios | 04fe641220f36df85a754b2944d60f245cf6cabd | [
"MIT"
] | 1 | 2022-03-14T20:49:04.000Z | 2022-03-14T20:49:04.000Z | Ex105.py | Fernando-Rodrigo/Exercicios | 04fe641220f36df85a754b2944d60f245cf6cabd | [
"MIT"
] | null | null | null | Ex105.py | Fernando-Rodrigo/Exercicios | 04fe641220f36df85a754b2944d60f245cf6cabd | [
"MIT"
] | null | null | null | """Faça um programa que tenha uma função notas() que pode receber várias notas de alunos e vai retornar e vai retornar um dicionário com as seguntes informações: -Qauntidade de notas; -A maior nota; -A menor nota; -A média da turma; -A situção(opcional(padrão=False)). Adicione também as docstrings da função. Passar várias notas por padrão sem pedir ao usuário para digitar as notas e situação."""
def notas(*notas, situacao=False):
"""
->Função de análise de notas de uma turma
:param *notas: recebe várias notas para serem analisadas
:param situação: recebe True ou False para mostrar ou não a situação da sala
:return: retorna um dicionário com as informações das notas recebidas
"""
nota = {}
media = sum(notas) / len(notas)
nota['Total'] = len(notas)
nota['MaiorNota'] = max(notas)
nota['MenorNota'] = min(notas)
nota['Média'] = media
if situacao == True:
if media < 4:
nota['situação'] = 'Péssima'
if media < 5:
nota['situação'] = 'Ruim'
if media < 7:
nota['situação'] = 'Boa'
if media < 9:
nota['situação'] = 'Muito Boa'
if media >= 9:
nota['situação'] = 'Excelente'
return nota
print(notas(1, 7, 8.5, 6, 2.5, situacao=False))
print(notas(9, 7, 9.1, 8.9, 7.8, 7.9, 6, 8, situacao=True))
| 37.861111 | 398 | 0.623624 | 195 | 1,363 | 4.358974 | 0.430769 | 0.041176 | 0.028235 | 0.04 | 0.054118 | 0.054118 | 0 | 0 | 0 | 0 | 0 | 0.023762 | 0.258988 | 1,363 | 35 | 399 | 38.942857 | 0.817822 | 0.468085 | 0 | 0 | 0 | 0 | 0.144509 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0 | 0 | 0.095238 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aa76ca36128c765ecd565fb6a80d181793216aa | 15,679 | py | Python | fHDHR/config/__init__.py | CarloDiGi/fHDHR | d8e79708c957a2872d3cf22d13c19c397f84aef6 | [
"WTFPL"
] | null | null | null | fHDHR/config/__init__.py | CarloDiGi/fHDHR | d8e79708c957a2872d3cf22d13c19c397f84aef6 | [
"WTFPL"
] | null | null | null | fHDHR/config/__init__.py | CarloDiGi/fHDHR | d8e79708c957a2872d3cf22d13c19c397f84aef6 | [
"WTFPL"
] | null | null | null | import os
import sys
import random
import configparser
import pathlib
import platform
import json
import fHDHR.exceptions
from fHDHR import fHDHR_VERSION
from fHDHR.tools import isint, isfloat, is_arithmetic, is_docker
class Config():
def __init__(self, args, script_dir, fHDHR_web):
self.fHDHR_web = fHDHR_web
self.internal = {}
self.conf_default = {}
self.dict = {}
self.internal["versions"] = {}
self.config_file = args.cfg
self.iliketobreakthings = args.iliketobreakthings
self.core_setup(script_dir)
def core_setup(self, script_dir):
data_dir = pathlib.Path(script_dir).joinpath('data')
internal_plugins_dir = pathlib.Path(script_dir).joinpath('plugins')
fHDHR_web_dir = pathlib.Path(script_dir).joinpath('fHDHR_web')
www_dir = pathlib.Path(fHDHR_web_dir).joinpath('www_dir')
self.internal["paths"] = {
"script_dir": script_dir,
"data_dir": data_dir,
"plugins_dir": [internal_plugins_dir],
"cache_dir": pathlib.Path(data_dir).joinpath('cache'),
"internal_config": pathlib.Path(data_dir).joinpath('internal_config'),
"fHDHR_web_dir": fHDHR_web_dir,
"www_dir": www_dir,
"www_templates_dir": pathlib.Path(fHDHR_web_dir).joinpath('templates'),
"font": pathlib.Path(data_dir).joinpath('garamond.ttf'),
}
for conffile in os.listdir(self.internal["paths"]["internal_config"]):
conffilepath = os.path.join(self.internal["paths"]["internal_config"], conffile)
if str(conffilepath).endswith(".json"):
self.read_json_config(conffilepath)
for file_item in os.listdir(self.internal["paths"]["fHDHR_web_dir"]):
file_item_path = pathlib.Path(self.internal["paths"]["fHDHR_web_dir"]).joinpath(file_item)
if str(file_item_path).endswith("_conf.json"):
self.read_json_config(file_item_path)
self.dict["epg"]["valid_methods"] = {None: {}}
self.dict["origins"] = {}
self.dict["origins"]["valid_methods"] = {}
self.dict["streaming"]["valid_methods"] = {"direct": {}}
self.dict["plugin_web_paths"] = {}
self.load_versions()
def register_web_path(self, name, path, plugin_dict_name):
self.dict["plugin_web_paths"][name.lower()] = {
"name": name,
"namespace": name.lower(),
"path": path,
"plugin": plugin_dict_name
}
def register_valid_origin_method(self, method_item):
self.dict["origins"]["valid_methods"][method_item.lower()] = {
"name": method_item,
"namespace": method_item.lower(),
}
def register_valid_streaming_method(self, method_item, plugin_dict_name):
self.dict["streaming"]["valid_methods"][method_item.lower()] = {
"name": method_item,
"namespace": method_item.lower(),
"plugin": plugin_dict_name
}
def register_valid_epg_method(self, method_item, plugin_dict_name):
self.dict["epg"]["valid_methods"][method_item.lower()] = {
"name": method_item,
"namespace": method_item.lower(),
"plugin": plugin_dict_name
}
def register_version(self, item_name, item_version, item_type):
self.internal["versions"][item_name] = {
"name": item_name,
"version": item_version,
"type": item_type
}
def import_conf_json(self, file_item_path):
self.read_json_config(file_item_path)
def load_versions(self):
self.register_version("fHDHR", fHDHR_VERSION, "fHDHR")
self.register_version("fHDHR_web", self.fHDHR_web.fHDHR_web_VERSION, "fHDHR")
self.register_version("Python", sys.version, "env")
opersystem = platform.system()
self.register_version("Operating System", opersystem, "env")
if opersystem in ["Linux", "Darwin"]:
# Linux/Mac
if os.getuid() == 0 or os.geteuid() == 0:
print('Warning: Do not run fHDHR with root privileges.')
elif opersystem in ["Windows"]:
# Windows
if os.environ.get("USERNAME") == "Administrator":
print('Warning: Do not run fHDHR as Administrator.')
else:
print("Uncommon Operating System, use at your own risk.")
isdocker = is_docker()
self.register_version("Docker", isdocker, "env")
def user_config(self):
print("Loading Configuration File: %s" % self.config_file)
self.read_ini_config(self.config_file)
def setup_user_config(self):
current_conf = {}
config_handler = configparser.ConfigParser()
config_handler.read(self.config_file)
for each_section in config_handler.sections():
if each_section.lower() not in list(current_conf.keys()):
current_conf[each_section.lower()] = {}
for (each_key, each_val) in config_handler.items(each_section):
each_val = self.get_real_conf_value(each_key, each_val)
import_val = True
if each_section in list(self.conf_default.keys()):
if each_key in list(self.conf_default[each_section].keys()):
if not self.conf_default[each_section][each_key]["config_file"] or self.iliketobreakthings:
import_val = False
if import_val:
current_conf[each_section.lower()][each_key.lower()] = each_val
for config_section in list(self.conf_default.keys()):
if config_section not in list(current_conf.keys()):
current_conf[config_section] = {}
for config_item in list(self.conf_default[config_section].keys()):
writeval = True
if config_item in list(current_conf[config_section].keys()):
writeval = False
if writeval:
value = self.conf_default[config_section][config_item]["value"]
self.write(config_item, value, config_section)
def config_verification_plugins(self):
required_missing = {}
# create dict and combine items
for config_section in list(self.conf_default.keys()):
for config_item in list(self.conf_default[config_section].keys()):
if self.conf_default[config_section][config_item]["required"]:
if not self.dict[config_section][config_item]:
if config_section not in list(required_missing.keys()):
required_missing[config_section] = []
required_missing[config_section].append(config_item)
for config_section in list(required_missing.keys()):
print("Warning! Required configuration options missing: [%s]%s" % (config_section, ", ".join(required_missing[config_section])))
if self.dict["epg"]["method"] and self.dict["epg"]["method"] not in ["None"]:
if isinstance(self.dict["epg"]["method"], str):
self.dict["epg"]["method"] = [self.dict["epg"]["method"]]
epg_methods = []
for epg_method in self.dict["epg"]["method"]:
if epg_method in list(self.dict["epg"]["valid_methods"].keys()):
epg_methods.append(epg_method)
elif epg_method in list(self.dict["origins"]["valid_methods"].keys()):
epg_methods.append(epg_method)
elif epg_method in ["origin", "origins"]:
epg_methods.extend(list(self.dict["origins"]["valid_methods"].keys()))
else:
raise fHDHR.exceptions.ConfigurationError("Invalid EPG Method. Exiting...")
self.dict["epg"]["method"] = epg_methods
if isinstance(self.dict["epg"]["method"], str):
self.dict["epg"]["method"] = [self.dict["epg"]["method"]]
if self.dict["epg"]["method"]:
self.dict["epg"]["def_method"] = self.dict["epg"]["method"][0]
else:
self.dict["epg"]["def_method"] = None
if self.dict["streaming"]["method"] not in self.dict["streaming"]["valid_methods"]:
raise fHDHR.exceptions.ConfigurationError("Invalid stream type. Exiting...")
def config_verification(self):
if not self.dict["main"]["uuid"]:
self.dict["main"]["uuid"] = ''.join(random.choice("hijklmnopqrstuvwxyz") for i in range(8))
self.write('uuid', self.dict["main"]["uuid"], 'main')
if self.dict["main"]["cache_dir"]:
if not pathlib.Path(self.dict["main"]["cache_dir"]).is_dir():
raise fHDHR.exceptions.ConfigurationError("Invalid Cache Directory. Exiting...")
self.internal["paths"]["cache_dir"] = pathlib.Path(self.dict["main"]["cache_dir"])
cache_dir = self.internal["paths"]["cache_dir"]
logs_dir = pathlib.Path(cache_dir).joinpath('logs')
self.internal["paths"]["logs_dir"] = logs_dir
if not logs_dir.is_dir():
logs_dir.mkdir()
self.dict["database"]["path"] = pathlib.Path(cache_dir).joinpath('fhdhr.db')
if not self.dict["fhdhr"]["discovery_address"] and self.dict["fhdhr"]["address"] != "0.0.0.0":
self.dict["fhdhr"]["discovery_address"] = self.dict["fhdhr"]["address"]
if not self.dict["fhdhr"]["discovery_address"] or self.dict["fhdhr"]["discovery_address"] == "0.0.0.0":
self.dict["fhdhr"]["discovery_address"] = None
def get_real_conf_value(self, key, confvalue):
if not confvalue:
confvalue = None
elif key == "xmltv_offset":
confvalue = str(confvalue)
elif str(confvalue) in ["0"]:
confvalue = 0
elif isint(confvalue):
confvalue = int(confvalue)
elif isfloat(confvalue):
confvalue = float(confvalue)
elif is_arithmetic(confvalue):
confvalue = eval(confvalue)
elif "," in confvalue:
confvalue = confvalue.split(",")
elif str(confvalue).lower() in ["none", ""]:
confvalue = None
elif str(confvalue).lower() in ["false"]:
confvalue = False
elif str(confvalue).lower() in ["true"]:
confvalue = True
return confvalue
def read_json_config(self, conffilepath):
with open(conffilepath, 'r') as jsonconf:
confimport = json.load(jsonconf)
for section in list(confimport.keys()):
if section not in self.dict.keys():
self.dict[section] = {}
if section not in self.conf_default.keys():
self.conf_default[section] = {}
for key in list(confimport[section].keys()):
if key not in list(self.conf_default[section].keys()):
self.conf_default[section][key] = {}
confvalue = self.get_real_conf_value(key, confimport[section][key]["value"])
self.dict[section][key] = confvalue
self.conf_default[section][key]["value"] = confvalue
for config_option in ["config_web_hidden", "config_file", "config_web", "required"]:
if config_option not in list(confimport[section][key].keys()):
config_option_value = False
else:
config_option_value = confimport[section][key][config_option]
if str(config_option_value).lower() in ["none"]:
config_option_value = None
elif str(config_option_value).lower() in ["false"]:
config_option_value = False
elif str(config_option_value).lower() in ["true"]:
config_option_value = True
self.conf_default[section][key][config_option] = config_option_value
if "valid_options" not in list(confimport[section][key].keys()):
config_option_value = None
else:
config_option_value = confimport[section][key]["valid_options"]
if "," in config_option_value:
config_option_value = config_option_value.split(",")
elif config_option_value in ["integer"]:
config_option_value = config_option_value
else:
config_option_value = [config_option_value]
self.conf_default[section][key]["valid_options"] = config_option_value
if "description" not in list(confimport[section][key].keys()):
config_option_value = None
else:
config_option_value = confimport[section][key]["description"]
self.conf_default[section][key]["description"] = config_option_value
def read_ini_config(self, conffilepath):
config_handler = configparser.ConfigParser()
config_handler.read(conffilepath)
for each_section in config_handler.sections():
if each_section.lower() not in list(self.dict.keys()):
self.dict[each_section.lower()] = {}
for (each_key, each_val) in config_handler.items(each_section):
each_val = self.get_real_conf_value(each_key, each_val)
import_val = True
if each_section in list(self.conf_default.keys()):
if each_key in list(self.conf_default[each_section].keys()):
if not self.conf_default[each_section][each_key]["config_file"] or self.iliketobreakthings:
import_val = False
if import_val:
self.dict[each_section.lower()][each_key.lower()] = each_val
def write(self, key, value, section):
if not value:
value = None
elif key == "xmltv_offset":
value = str(value)
elif str(value) in ["0"]:
value = 0
elif isint(value):
value = int(value)
elif isfloat(value):
value = float(value)
elif is_arithmetic(value):
value = eval(value)
elif isinstance(value, list):
",".join(value)
elif str(value).lower() in ["none", ""]:
value = None
elif str(value).lower() in ["false"]:
value = False
elif str(value).lower() in ["true"]:
value = True
self.dict[section][key] = value
config_handler = configparser.ConfigParser()
config_handler.read(self.config_file)
if not config_handler.has_section(section):
config_handler.add_section(section)
config_handler.set(section, key, str(value))
with open(self.config_file, 'w') as config_file:
config_handler.write(config_file)
def __getattr__(self, name):
''' will only get called for undefined attributes '''
if name in list(self.dict.keys()):
return self.dict[name]
| 43.674095 | 140 | 0.570827 | 1,729 | 15,679 | 4.947947 | 0.107577 | 0.049562 | 0.045704 | 0.023846 | 0.535359 | 0.377674 | 0.321449 | 0.243366 | 0.22782 | 0.208416 | 0 | 0.001472 | 0.306716 | 15,679 | 358 | 141 | 43.796089 | 0.785557 | 0.006059 | 0 | 0.214533 | 0 | 0 | 0.111332 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.062284 | false | 0 | 0.093426 | 0 | 0.16609 | 0.017301 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aa7a26cf52e0251bce7779f2f48e1a5c15fabf2 | 415 | py | Python | eth_utils/typing/misc.py | Arkalius/eth-utils | 9b574adc89bb3fb56771ef7a3910632e3577c834 | [
"MIT"
] | 2 | 2018-04-27T06:59:10.000Z | 2020-04-09T04:01:46.000Z | eth_utils/typing/misc.py | Arkalius/eth-utils | 9b574adc89bb3fb56771ef7a3910632e3577c834 | [
"MIT"
] | null | null | null | eth_utils/typing/misc.py | Arkalius/eth-utils | 9b574adc89bb3fb56771ef7a3910632e3577c834 | [
"MIT"
] | null | null | null | from typing import (
NewType,
TypeVar,
Union,
)
from eth_typing import Address
HexAddress = NewType('HexAddress', str) # for hex encoded addresses
ChecksumAddress = NewType('ChecksumAddress', HexAddress) # for hex addresses with checksums
AnyAddress = TypeVar('AnyAddress', Address, HexAddress, ChecksumAddress)
HexStr = NewType('HexStr', str)
Primitives = Union[bytes, int, bool]
T = TypeVar('T')
| 25.9375 | 92 | 0.737349 | 46 | 415 | 6.630435 | 0.521739 | 0.078689 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.159036 | 415 | 15 | 93 | 27.666667 | 0.873926 | 0.139759 | 0 | 0 | 0 | 0 | 0.118644 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aaa0e3998c581f6b39f4c1d29454a3e33b3b65f | 641 | py | Python | exercicio44.py | profnssorg/henriqueJoner1 | be5ddd3be50716a9d0c99bc74a434b4fc396e0a0 | [
"MIT"
] | null | null | null | exercicio44.py | profnssorg/henriqueJoner1 | be5ddd3be50716a9d0c99bc74a434b4fc396e0a0 | [
"MIT"
] | null | null | null | exercicio44.py | profnssorg/henriqueJoner1 | be5ddd3be50716a9d0c99bc74a434b4fc396e0a0 | [
"MIT"
] | null | null | null | """
Descrição: Este programa calcula o reajuste correto de acordo com o salário do funcionário
Autor:Henrique Joner
Versão:0.0.1
Data:25/11/2018
"""
#Inicialização de variáveis
salario = 0
aumento = 0
novosalario = 0
#Entrada de dados
salario = float(input("Para que possamos verificar o seu reajuste, informe o seu salário: "))
aumento = 15
#Processamento de dados
if salario <= 1250:
novosalario = salario * (1 + aumento/100)
if salario > 1250:
aumento = 10
novosalario = salario * (1 + aumento/100)
#Saída de dados
print("O seu novo salário será de R$ %5.2f, seu reajuste foi de %d%%!" % (novosalario, aumento))
| 17.805556 | 96 | 0.703588 | 94 | 641 | 4.797872 | 0.56383 | 0.046563 | 0.05765 | 0.115299 | 0.128603 | 0 | 0 | 0 | 0 | 0 | 0 | 0.069767 | 0.195008 | 641 | 35 | 97 | 18.314286 | 0.804264 | 0.341654 | 0 | 0.181818 | 0 | 0 | 0.314634 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aaa793629f0e8ef522a0ea519801415d767b2bc | 2,332 | py | Python | app/permission.py | Stanford-PERTS/neptune | 20b945adf7b62e67db60be3cc451ffb16113fe33 | [
"CC0-1.0"
] | null | null | null | app/permission.py | Stanford-PERTS/neptune | 20b945adf7b62e67db60be3cc451ffb16113fe33 | [
"CC0-1.0"
] | null | null | null | app/permission.py | Stanford-PERTS/neptune | 20b945adf7b62e67db60be3cc451ffb16113fe33 | [
"CC0-1.0"
] | null | null | null | """What relationships (ownership, association...) do users have to things?"""
import logging
from model import DatastoreModel
def owns(user, id_or_entity):
"""Does this user own the object in question?"""
# Supers own everything.
if user.super_admin:
return True
if owns_program(user, id_or_entity):
return True
# Convert to id.
uid = (str(id_or_entity) if isinstance(id_or_entity, basestring)
else id_or_entity.uid)
kind = DatastoreModel.get_kind(uid)
# Everyone owns public data.
owned_orgs = user.owned_organizations + ['Organization_public']
if kind == 'Organization':
result = uid in owned_orgs
elif kind == 'Project':
project = DatastoreModel.get_by_id(uid)
user_owns_program = project.program_label in user.owned_programs
user_owns_org = project.organization_id in owned_orgs
user_owns_project = project.uid in user.owned_projects
result = user_owns_program or user_owns_org or user_owns_project
elif kind == 'ProjectCohort':
# Same logic as project
pc = DatastoreModel.get_by_id(uid)
user_owns_program = pc.program_label in user.owned_programs
user_owns_org = pc.organization_id in owned_orgs
result = user_owns_program or user_owns_org
elif kind == 'Survey':
# Same logic as project
survey = DatastoreModel.get_by_id(uid)
user_owns_program = survey.program_label in user.owned_programs
user_owns_org = survey.organization_id in owned_orgs
result = user_owns_program or user_owns_org
elif kind == 'Task' or kind == 'TaskReminder':
# same ownership as parent
result = owns(user, DatastoreModel.get_parent_uid(uid))
elif kind == 'User':
result = uid == user.uid # no slavery!
elif kind == 'DataTable':
result = uid in user.owned_data_tables
elif kind == 'DataRequest':
result = uid in user.owned_data_requests
elif kind == 'Notification':
# same ownership as parent
result = owns(user, DatastoreModel.get_parent_uid(uid))
else:
raise NotImplementedError("Ownership does not apply to " + uid)
return result
def owns_program(user, program_label):
return user.super_admin or program_label in user.owned_programs
| 34.80597 | 77 | 0.685678 | 309 | 2,332 | 4.925566 | 0.252427 | 0.073587 | 0.050591 | 0.047306 | 0.42247 | 0.406045 | 0.354139 | 0.354139 | 0.254928 | 0.172142 | 0 | 0 | 0.237564 | 2,332 | 66 | 78 | 35.333333 | 0.856018 | 0.122642 | 0 | 0.136364 | 0 | 0 | 0.067554 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.045455 | 0.022727 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aaac7f7cf6d77a715e51dcfe9770d2c396acd7a | 4,005 | py | Python | misago/users/admin.py | HenryChenV/iJiangNan | 68f156d264014939f0302222e16e3125119dd3e3 | [
"MIT"
] | 1 | 2017-07-25T03:04:36.000Z | 2017-07-25T03:04:36.000Z | misago/users/admin.py | HenryChenV/iJiangNan | 68f156d264014939f0302222e16e3125119dd3e3 | [
"MIT"
] | null | null | null | misago/users/admin.py | HenryChenV/iJiangNan | 68f156d264014939f0302222e16e3125119dd3e3 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.contrib import admin as djadmin
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from .djangoadmin import UserAdminModel
from .views.admin.bans import BansList, DeleteBan, EditBan, NewBan
from .views.admin.ranks import (
DefaultRank, DeleteRank, EditRank, MoveDownRank, MoveUpRank, NewRank, RanksList, RankUsers)
from .views.admin.users import (
DeleteAccountStep, DeletePostsStep, DeleteThreadsStep, EditUser, NewUser, UsersList)
djadmin.site.register(model_or_iterable=get_user_model(), admin_class=UserAdminModel)
class MisagoAdminExtension(object):
def register_urlpatterns(self, urlpatterns):
# Users section
urlpatterns.namespace(r'^users/', 'users')
# Accounts
urlpatterns.namespace(r'^accounts/', 'accounts', 'users')
urlpatterns.patterns(
'users:accounts',
url(r'^$', UsersList.as_view(), name='index'),
url(r'^(?P<page>\d+)/$', UsersList.as_view(), name='index'),
url(r'^new/$', NewUser.as_view(), name='new'),
url(r'^edit/(?P<pk>\d+)/$', EditUser.as_view(), name='edit'),
url(
r'^delete-threads/(?P<pk>\d+)/$',
DeleteThreadsStep.as_view(),
name='delete-threads'
),
url(r'^delete-posts/(?P<pk>\d+)/$', DeletePostsStep.as_view(), name='delete-posts'),
url(
r'^delete-account/(?P<pk>\d+)/$',
DeleteAccountStep.as_view(),
name='delete-account'
),
)
# Ranks
urlpatterns.namespace(r'^ranks/', 'ranks', 'users')
urlpatterns.patterns(
'users:ranks',
url(r'^$', RanksList.as_view(), name='index'),
url(r'^new/$', NewRank.as_view(), name='new'),
url(r'^edit/(?P<pk>\d+)/$', EditRank.as_view(), name='edit'),
url(r'^default/(?P<pk>\d+)/$', DefaultRank.as_view(), name='default'),
url(r'^move/down/(?P<pk>\d+)/$', MoveDownRank.as_view(), name='down'),
url(r'^move/up/(?P<pk>\d+)/$', MoveUpRank.as_view(), name='up'),
url(r'^users/(?P<pk>\d+)/$', RankUsers.as_view(), name='users'),
url(r'^delete/(?P<pk>\d+)/$', DeleteRank.as_view(), name='delete'),
)
# Bans
urlpatterns.namespace(r'^bans/', 'bans', 'users')
urlpatterns.patterns(
'users:bans',
url(r'^$', BansList.as_view(), name='index'),
url(r'^(?P<page>\d+)/$', BansList.as_view(), name='index'),
url(r'^new/$', NewBan.as_view(), name='new'),
url(r'^edit/(?P<pk>\d+)/$', EditBan.as_view(), name='edit'),
url(r'^delete/(?P<pk>\d+)/$', DeleteBan.as_view(), name='delete'),
)
def register_navigation_nodes(self, site):
site.add_node(
name=_("Users"),
icon='fa fa-users',
parent='misago:admin',
after='misago:admin:index',
namespace='misago:admin:users',
link='misago:admin:users:accounts:index',
)
site.add_node(
name=_("User Accounts"),
icon='fa fa-users',
parent='misago:admin:users',
namespace='misago:admin:users:accounts',
link='misago:admin:users:accounts:index',
)
site.add_node(
name=_("Ranks"),
icon='fa fa-graduation-cap',
parent='misago:admin:users',
after='misago:admin:users:accounts:index',
namespace='misago:admin:users:ranks',
link='misago:admin:users:ranks:index',
)
site.add_node(
name=_("Bans"),
icon='fa fa-lock',
parent='misago:admin:users',
after='misago:admin:users:ranks:index',
namespace='misago:admin:users:bans',
link='misago:admin:users:bans:index',
)
| 38.883495 | 96 | 0.55206 | 449 | 4,005 | 4.835189 | 0.193764 | 0.036849 | 0.092123 | 0.034546 | 0.320129 | 0.2538 | 0.237218 | 0.141409 | 0.101796 | 0.078766 | 0 | 0 | 0.266667 | 4,005 | 102 | 97 | 39.264706 | 0.73919 | 0.00824 | 0 | 0.209302 | 0 | 0 | 0.249307 | 0.1152 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.093023 | 0 | 0.127907 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aae6ac5b39f99950764ab16105858ed9000067a | 13,268 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/modules/cloud/vmware/vmware_vm_vm_drs_rule.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/cloud/vmware/vmware_vm_vm_drs_rule.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/cloud/vmware/vmware_vm_vm_drs_rule.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_vm_vm_drs_rule
short_description: Configure VMware DRS Affinity rule for virtual machine in given cluster
description:
- This module can be used to configure VMware DRS Affinity rule for virtual machine in given cluster.
version_added: 2.5
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
cluster_name:
description:
- Desired cluster name where virtual machines are present for the DRS rule.
required: True
type: str
vms:
description:
- List of virtual machines name for which DRS rule needs to be applied.
- Required if C(state) is set to C(present).
type: list
drs_rule_name:
description:
- The name of the DRS rule to manage.
required: True
type: str
enabled:
description:
- If set to C(True), the DRS rule will be enabled.
- Effective only if C(state) is set to C(present).
default: False
type: bool
mandatory:
description:
- If set to C(True), the DRS rule will be mandatory.
- Effective only if C(state) is set to C(present).
default: False
type: bool
affinity_rule:
description:
- If set to C(True), the DRS rule will be an Affinity rule.
- If set to C(False), the DRS rule will be an Anti-Affinity rule.
- Effective only if C(state) is set to C(present).
default: True
type: bool
state:
description:
- If set to C(present), then the DRS rule is created if not present.
- If set to C(present), then the DRS rule is already present, it updates to the given configurations.
- If set to C(absent), then the DRS rule is deleted if present.
required: False
default: present
choices: [ present, absent ]
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Create DRS Affinity Rule for VM-VM
vmware_vm_vm_drs_rule:
hostname: "{{ esxi_server }}"
username: "{{ esxi_username }}"
password: "{{ esxi_password }}"
cluster_name: "{{ cluster_name }}"
validate_certs: no
vms:
- vm1
- vm2
drs_rule_name: vm1-vm2-affinity-rule-001
enabled: True
mandatory: True
affinity_rule: True
delegate_to: localhost
- name: Create DRS Anti-Affinity Rule for VM-VM
vmware_vm_vm_drs_rule:
hostname: "{{ esxi_server }}"
username: "{{ esxi_username }}"
password: "{{ esxi_password }}"
cluster_name: "{{ cluster_name }}"
validate_certs: no
enabled: True
vms:
- vm1
- vm2
drs_rule_name: vm1-vm2-affinity-rule-001
mandatory: True
affinity_rule: False
delegate_to: localhost
- name: Delete DRS Affinity Rule for VM-VM
vmware_vm_vm_drs_rule:
hostname: "{{ esxi_server }}"
username: "{{ esxi_username }}"
password: "{{ esxi_password }}"
cluster_name: "{{ cluster_name }}"
validate_certs: no
drs_rule_name: vm1-vm2-affinity-rule-001
state: absent
delegate_to: localhost
'''
RETURN = r'''
result:
description: metadata about DRS VM and VM rule
returned: when state is present
type: dict
sample: {
"rule_enabled": false,
"rule_key": 20,
"rule_mandatory": true,
"rule_name": "drs_rule_0014",
"rule_uuid": "525f3bc0-253f-825a-418e-2ec93bffc9ae",
"rule_vms": [
"VM_65",
"VM_146"
]
}
'''
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.vmware import (PyVmomi, vmware_argument_spec, wait_for_task,
find_vm_by_id, find_cluster_by_name)
class VmwareDrs(PyVmomi):
def __init__(self, module):
super(VmwareDrs, self).__init__(module)
self.vm_list = module.params['vms']
self.cluster_name = module.params['cluster_name']
self.rule_name = module.params['drs_rule_name']
self.enabled = module.params['enabled']
self.mandatory = module.params['mandatory']
self.affinity_rule = module.params['affinity_rule']
self.state = module.params['state']
# Sanity check for cluster
self.cluster_obj = find_cluster_by_name(content=self.content,
cluster_name=self.cluster_name)
if self.cluster_obj is None:
self.module.fail_json(msg="Failed to find the cluster %s" % self.cluster_name)
# Sanity check for virtual machines
self.vm_obj_list = []
if self.state == 'present':
# Get list of VMs only if state is present
self.vm_obj_list = self.get_all_vms_info()
# Getter
def get_all_vms_info(self, vms_list=None):
"""
Get all VM objects using name from given cluster
Args:
vms_list: List of VM names
Returns: List of VM managed objects
"""
vm_obj_list = []
if vms_list is None:
vms_list = self.vm_list
for vm_name in vms_list:
vm_obj = find_vm_by_id(content=self.content, vm_id=vm_name,
vm_id_type='vm_name', cluster=self.cluster_obj)
if vm_obj is None:
self.module.fail_json(msg="Failed to find the virtual machine %s "
"in given cluster %s" % (vm_name,
self.cluster_name))
vm_obj_list.append(vm_obj)
return vm_obj_list
def get_rule_key_by_name(self, cluster_obj=None, rule_name=None):
"""
Get a specific DRS rule key by name
Args:
rule_name: Name of rule
cluster_obj: Cluster managed object
Returns: Rule Object if found or None
"""
if cluster_obj is None:
cluster_obj = self.cluster_obj
if rule_name:
rules_list = [rule for rule in cluster_obj.configuration.rule if rule.name == rule_name]
if rules_list:
return rules_list[0]
# No rule found
return None
@staticmethod
def normalize_rule_spec(rule_obj=None):
"""
Return human readable rule spec
Args:
rule_obj: Rule managed object
Returns: Dictionary with Rule info
"""
if rule_obj is None:
return {}
return dict(rule_key=rule_obj.key,
rule_enabled=rule_obj.enabled,
rule_name=rule_obj.name,
rule_mandatory=rule_obj.mandatory,
rule_uuid=rule_obj.ruleUuid,
rule_vms=[vm.name for vm in rule_obj.vm],
rule_affinity=True if isinstance(rule_obj, vim.cluster.AffinityRuleSpec) else False,
)
# Create
def create(self):
"""
Create a DRS rule if rule does not exist
"""
rule_obj = self.get_rule_key_by_name(rule_name=self.rule_name)
if rule_obj is not None:
existing_rule = self.normalize_rule_spec(rule_obj=rule_obj)
if ((sorted(existing_rule['rule_vms']) == sorted(self.vm_list)) and
(existing_rule['rule_enabled'] == self.enabled) and
(existing_rule['rule_mandatory'] == self.mandatory) and
(existing_rule['rule_affinity'] == self.affinity_rule)):
self.module.exit_json(changed=False, result=existing_rule, msg="Rule already exists with the same configuration")
else:
changed, result = self.update_rule_spec(rule_obj)
return changed, result
else:
changed, result = self.create_rule_spec()
return changed, result
def create_rule_spec(self):
"""
Create DRS rule
"""
changed = False
if self.affinity_rule:
rule = vim.cluster.AffinityRuleSpec()
else:
rule = vim.cluster.AntiAffinityRuleSpec()
rule.vm = self.vm_obj_list
rule.enabled = self.enabled
rule.mandatory = self.mandatory
rule.name = self.rule_name
rule_spec = vim.cluster.RuleSpec(info=rule, operation='add')
config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])
try:
task = self.cluster_obj.ReconfigureEx(config_spec, modify=True)
changed, result = wait_for_task(task)
except vmodl.fault.InvalidRequest as e:
result = to_native(e.msg)
except Exception as e:
result = to_native(e)
if changed:
rule_obj = self.get_rule_key_by_name(rule_name=self.rule_name)
result = self.normalize_rule_spec(rule_obj)
return changed, result
def update_rule_spec(self, rule_obj=None):
"""
Update DRS rule
"""
changed = False
rule_obj.vm = self.vm_obj_list
if (rule_obj.mandatory != self.mandatory):
rule_obj.mandatory = self.mandatory
if (rule_obj.enabled != self.enabled):
rule_obj.enabled = self.enabled
rule_spec = vim.cluster.RuleSpec(info=rule_obj, operation='edit')
config_spec = vim.cluster.ConfigSpec(rulesSpec=[rule_spec])
try:
task = self.cluster_obj.ReconfigureCluster_Task(config_spec, modify=True)
changed, result = wait_for_task(task)
except vmodl.fault.InvalidRequest as e:
result = to_native(e.msg)
except Exception as e:
result = to_native(e)
if changed:
rule_obj = self.get_rule_key_by_name(rule_name=self.rule_name)
result = self.normalize_rule_spec(rule_obj)
return changed, result
# Delete
def delete(self, rule_name=None):
"""
Delete DRS rule using name
"""
changed = False
if rule_name is None:
rule_name = self.rule_name
rule = self.get_rule_key_by_name(rule_name=rule_name)
if rule is not None:
rule_key = int(rule.key)
rule_spec = vim.cluster.RuleSpec(removeKey=rule_key, operation='remove')
config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])
try:
task = self.cluster_obj.ReconfigureEx(config_spec, modify=True)
changed, result = wait_for_task(task)
except vmodl.fault.InvalidRequest as e:
result = to_native(e.msg)
except Exception as e:
result = to_native(e)
else:
result = 'No rule named %s exists' % self.rule_name
return changed, result
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
vms=dict(type='list'),
cluster_name=dict(type='str', required=True),
drs_rule_name=dict(type='str', required=True),
enabled=dict(type='bool', default=False),
mandatory=dict(type='bool', default=False),
affinity_rule=dict(type='bool', default=True),
)
)
required_if = [
['state', 'present', ['vms']]
]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
results = dict(failed=False, changed=False)
state = module.params['state']
vm_drs = VmwareDrs(module)
if state == 'present':
# Add Rule
if module.check_mode:
results['changed'] = True
module.exit_json(**results)
changed, result = vm_drs.create()
if changed:
results['changed'] = changed
else:
results['failed'] = True
results['msg'] = "Failed to create DRS rule %s" % vm_drs.rule_name
results['result'] = result
elif state == 'absent':
# Delete Rule
if module.check_mode:
results['changed'] = True
module.exit_json(**results)
changed, result = vm_drs.delete()
if changed:
results['changed'] = changed
results['msg'] = "DRS rule %s deleted successfully." % vm_drs.rule_name
else:
if "No rule named" in result:
results['msg'] = result
module.exit_json(**results)
results['failed'] = True
results['msg'] = "Failed to delete DRS rule %s" % vm_drs.rule_name
results['result'] = result
if results['changed']:
module.exit_json(**results)
if results['failed']:
module.fail_json(**results)
if __name__ == '__main__':
main()
| 32.440098 | 129 | 0.603784 | 1,646 | 13,268 | 4.662211 | 0.156136 | 0.029189 | 0.0086 | 0.007297 | 0.381809 | 0.33607 | 0.310529 | 0.288116 | 0.272348 | 0.272348 | 0 | 0.007124 | 0.301703 | 13,268 | 408 | 130 | 32.519608 | 0.821155 | 0.06218 | 0 | 0.378981 | 0 | 0.003185 | 0.32841 | 0.019038 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028662 | false | 0.012739 | 0.019108 | 0 | 0.082803 | 0.003185 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ab093a60e783c2f660d81528a6164f1470408b4 | 324 | py | Python | pyspin/__init__.py | crunchex/py-spin | 3a17c85596d0f5a25c751878b4d42f5d52437f5b | [
"MIT"
] | 196 | 2015-12-15T15:50:46.000Z | 2022-03-12T20:19:16.000Z | pyspin/__init__.py | crunchex/py-spin | 3a17c85596d0f5a25c751878b4d42f5d52437f5b | [
"MIT"
] | 19 | 2015-12-13T13:52:41.000Z | 2021-08-19T00:42:43.000Z | pyspin/__init__.py | crunchex/py-spin | 3a17c85596d0f5a25c751878b4d42f5d52437f5b | [
"MIT"
] | 17 | 2015-12-16T17:56:58.000Z | 2021-03-31T08:02:23.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pyspin
~~~~~~~
Little terminal spinner lib.
:copyright: (c) 2015 by lord63.
:license: MIT, see LICENSE for more details.
"""
__title__ = "pyspin"
__version__ = '1.1.1'
__author__ = "lord63"
__license__ = "MIT"
__copyright__ = "Copyright 2015 lord63"
| 17.052632 | 48 | 0.623457 | 38 | 324 | 4.789474 | 0.710526 | 0.142857 | 0.175824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.070313 | 0.209877 | 324 | 18 | 49 | 18 | 0.640625 | 0.509259 | 0 | 0 | 0 | 0 | 0.315385 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ab2f00f716b8988c807ade55d42aa5beb78f798 | 3,104 | py | Python | ForgeWiki/forgewiki/tests/test_models.py | shalithasuranga/allura | 4f7fba13415954d07f602a051ec697329dd3706b | [
"Apache-2.0"
] | 1 | 2019-03-17T04:16:15.000Z | 2019-03-17T04:16:15.000Z | ForgeWiki/forgewiki/tests/test_models.py | DalavanCloud/allura | a25329caed9e6d136a1004c33372e0632a16e352 | [
"Apache-2.0"
] | null | null | null | ForgeWiki/forgewiki/tests/test_models.py | DalavanCloud/allura | a25329caed9e6d136a1004c33372e0632a16e352 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from pylons import tmpl_context as c
from ming.orm import session
from allura.tests import TestController
from allura.tests import decorators as td
from alluratest.controller import setup_global_objects
from allura import model as M
from allura.lib import helpers as h
from forgewiki.model import Page
class TestPageSnapshots(TestController):
@td.with_wiki
def test_version_race(self):
# threads must not throw DuplicateKeyError
# details https://sourceforge.net/p/allura/tickets/7647/
import time
import random
from threading import Thread, Lock
page = Page.upsert('test-page')
page.commit()
lock = Lock()
def run(n):
setup_global_objects()
for i in range(10):
page = Page.query.get(title='test-page')
page.text = 'Test Page %s.%s' % (n, i)
time.sleep(random.random())
# tests use mim (mongo-in-memory), which isn't thread-safe
lock.acquire()
try:
page.commit()
finally:
lock.release()
t1 = Thread(target=lambda: run(1))
t2 = Thread(target=lambda: run(2))
t1.start()
t2.start()
t1.join()
t2.join()
page = Page.query.get(title='test-page')
# 10 changes by each thread + initial upsert
assert page.history().count() == 21, page.history().count()
class TestPage(TestController):
@td.with_wiki
def test_authors(self):
user = M.User.by_username('test-user')
admin = M.User.by_username('test-admin')
with h.push_config(c, user=admin):
page = Page.upsert('test-admin')
page.text = 'admin'
page.commit()
with h.push_config(c, user=user):
page.text = 'user'
page.commit()
authors = page.authors()
assert len(authors) == 2
assert user in authors
assert admin in authors
user.disabled = True
session(user).flush(user)
authors = page.authors()
assert len(authors) == 1
assert user not in authors
assert admin in authors
| 32 | 74 | 0.611469 | 393 | 3,104 | 4.793893 | 0.43257 | 0.031847 | 0.0138 | 0.016985 | 0.171975 | 0.151805 | 0.030786 | 0 | 0 | 0 | 0 | 0.01108 | 0.302191 | 3,104 | 96 | 75 | 32.333333 | 0.858726 | 0.330541 | 0 | 0.206897 | 0 | 0 | 0.039005 | 0 | 0 | 0 | 0 | 0 | 0.12069 | 1 | 0.051724 | false | 0 | 0.189655 | 0 | 0.275862 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ab36b57eaee848ac634baf04f49df5552770d72 | 6,251 | py | Python | lldb/test/API/functionalities/thread/step_out/TestThreadStepOut.py | Machiry/checkedc-clang | ab9360d8be0a737cb5e09051f3a0051adc4b3e47 | [
"BSD-Source-Code"
] | 250 | 2019-05-07T12:56:44.000Z | 2022-03-10T15:52:06.000Z | lldb/test/API/functionalities/thread/step_out/TestThreadStepOut.py | procedural/checkedc_binaries_ubuntu_16_04_from_12_Oct_2021 | ad4e8b01121fbfb40d81ee798480add7dc93f0bf | [
"BSD-Source-Code"
] | 410 | 2019-06-06T20:52:32.000Z | 2022-01-18T14:21:48.000Z | lldb/test/API/functionalities/thread/step_out/TestThreadStepOut.py | procedural/checkedc_binaries_ubuntu_16_04_from_12_Oct_2021 | ad4e8b01121fbfb40d81ee798480add7dc93f0bf | [
"BSD-Source-Code"
] | 50 | 2019-05-10T21:12:24.000Z | 2022-01-21T06:39:47.000Z | """
Test stepping out from a function in a multi-threaded program.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ThreadStepOutTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
# Test occasionally times out on the Linux build bot
@skipIfLinux
@expectedFailureAll(
oslist=["linux"],
bugnumber="llvm.org/pr23477 Test occasionally times out on the Linux build bot")
@expectedFailureAll(
oslist=["freebsd"],
bugnumber="llvm.org/pr18066 inferior does not exit")
@skipIfWindows # This test will hang on windows llvm.org/pr21753
@expectedFailureAll(oslist=["windows"])
@expectedFailureNetBSD
def test_step_single_thread(self):
"""Test thread step out on one thread via command interpreter. """
self.build(dictionary=self.getBuildFlags())
self.step_out_test(self.step_out_single_thread_with_cmd)
# Test occasionally times out on the Linux build bot
@skipIfLinux
@expectedFailureAll(
oslist=["linux"],
bugnumber="llvm.org/pr23477 Test occasionally times out on the Linux build bot")
@expectedFailureAll(
oslist=["freebsd"],
bugnumber="llvm.org/pr19347 2nd thread stops at breakpoint")
@skipIfWindows # This test will hang on windows llvm.org/pr21753
@expectedFailureAll(oslist=["windows"])
@expectedFailureAll(oslist=["watchos"], archs=['armv7k'], bugnumber="rdar://problem/34674488") # stop reason is trace when it should be step-out
@expectedFailureNetBSD
def test_step_all_threads(self):
"""Test thread step out on all threads via command interpreter. """
self.build(dictionary=self.getBuildFlags())
self.step_out_test(self.step_out_all_threads_with_cmd)
# Test occasionally times out on the Linux build bot
@skipIfLinux
@expectedFailureAll(
oslist=["linux"],
bugnumber="llvm.org/pr23477 Test occasionally times out on the Linux build bot")
@expectedFailureAll(
oslist=["freebsd"],
bugnumber="llvm.org/pr19347 2nd thread stops at breakpoint")
@skipIfWindows # This test will hang on windows llvm.org/pr21753
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24681")
@expectedFailureNetBSD
def test_python(self):
"""Test thread step out on one thread via Python API (dwarf)."""
self.build(dictionary=self.getBuildFlags())
self.step_out_test(self.step_out_with_python)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number for our breakpoint.
self.bkpt_string = '// Set breakpoint here'
self.breakpoint = line_number('main.cpp', self.bkpt_string)
self.step_out_destination = line_number(
'main.cpp', '// Expect to stop here after step-out.')
def step_out_single_thread_with_cmd(self):
self.step_out_with_cmd("this-thread")
self.expect(
"thread backtrace all",
"Thread location after step out is correct",
substrs=[
"main.cpp:%d" %
self.step_out_destination,
"main.cpp:%d" %
self.breakpoint])
def step_out_all_threads_with_cmd(self):
self.step_out_with_cmd("all-threads")
self.expect(
"thread backtrace all",
"Thread location after step out is correct",
substrs=[
"main.cpp:%d" %
self.step_out_destination])
def step_out_with_cmd(self, run_mode):
self.runCmd("thread select %d" % self.step_out_thread.GetIndexID())
self.runCmd("thread step-out -m %s" % run_mode)
self.expect("process status", "Expected stop reason to be step-out",
substrs=["stop reason = step out"])
self.expect(
"thread list",
"Selected thread did not change during step-out",
substrs=[
"* thread #%d" %
self.step_out_thread.GetIndexID()])
def step_out_with_python(self):
self.step_out_thread.StepOut()
reason = self.step_out_thread.GetStopReason()
self.assertEqual(
lldb.eStopReasonPlanComplete,
reason,
"Expected thread stop reason 'plancomplete', but got '%s'" %
lldbutil.stop_reason_to_str(reason))
# Verify location after stepping out
frame = self.step_out_thread.GetFrameAtIndex(0)
desc = lldbutil.get_description(frame.GetLineEntry())
expect = "main.cpp:%d" % self.step_out_destination
self.assertTrue(
expect in desc, "Expected %s but thread stopped at %s" %
(expect, desc))
def step_out_test(self, step_out_func):
"""Test single thread step out of a function."""
(self.inferior_target, self.inferior_process, thread, bkpt) = lldbutil.run_to_source_breakpoint(
self, self.bkpt_string, lldb.SBFileSpec('main.cpp'), only_one_thread = False)
# We hit the breakpoint on at least one thread. If we hit it on both threads
# simultaneously, we can try the step out. Otherwise, suspend the thread
# that hit the breakpoint, and continue till the second thread hits
# the breakpoint:
(breakpoint_threads, other_threads) = ([], [])
lldbutil.sort_stopped_threads(self.inferior_process,
breakpoint_threads=breakpoint_threads,
other_threads=other_threads)
if len(breakpoint_threads) == 1:
success = thread.Suspend()
self.assertTrue(success, "Couldn't suspend a thread")
bkpt_threads = lldbutil.continue_to_breakpoint(self.inferior_process,
bkpt)
self.assertEqual(len(bkpt_threads), 1, "Second thread stopped")
success = thread.Resume()
self.assertTrue(success, "Couldn't resume a thread")
self.step_out_thread = breakpoint_threads[0]
# Step out of thread stopped at breakpoint
step_out_func()
| 40.590909 | 148 | 0.640378 | 731 | 6,251 | 5.321477 | 0.23803 | 0.07018 | 0.053728 | 0.037018 | 0.439332 | 0.424936 | 0.384062 | 0.37635 | 0.36144 | 0.344216 | 0 | 0.014155 | 0.265398 | 6,251 | 153 | 149 | 40.856209 | 0.83297 | 0.160294 | 0 | 0.392857 | 0 | 0 | 0.20242 | 0.004417 | 0 | 0 | 0 | 0 | 0.044643 | 1 | 0.080357 | false | 0 | 0.035714 | 0 | 0.133929 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ab41301f104514b07e34261351dc09c8857304c | 1,846 | py | Python | ParProcCo/nxdata_aggregation_mode.py | DiamondLightSource/ParProcCo | 75742278f567a36e7bb74bb7c00f98407270a8ac | [
"BSD-3-Clause"
] | null | null | null | ParProcCo/nxdata_aggregation_mode.py | DiamondLightSource/ParProcCo | 75742278f567a36e7bb74bb7c00f98407270a8ac | [
"BSD-3-Clause"
] | 5 | 2021-09-07T15:02:49.000Z | 2022-03-17T20:43:00.000Z | ParProcCo/nxdata_aggregation_mode.py | DiamondLightSource/ParProcCo | 75742278f567a36e7bb74bb7c00f98407270a8ac | [
"BSD-3-Clause"
] | 1 | 2021-12-07T08:50:48.000Z | 2021-12-07T08:50:48.000Z | from __future__ import annotations
import os
from datetime import datetime
from pathlib import Path
from typing import List, Optional, Tuple
from ParProcCo.scheduler_mode_interface import SchedulerModeInterface
from ParProcCo.utils import check_jobscript_is_readable, check_location, format_timestamp, get_absolute_path
class NXdataAggregationMode(SchedulerModeInterface):
def __init__(self):
current_script_dir = Path(os.path.realpath(__file__)).parent.parent / "scripts"
self.program_path = current_script_dir / "nxdata_aggregate"
self.cores = 1
self.allowed_modules = ('python',)
def set_parameters(self, sliced_results: List[Path]) -> None:
"""Overrides SchedulerModeInterface.set_parameters"""
self.sliced_results = [str(res) for res in sliced_results]
self.number_jobs: int = 1
def generate_output_paths(self, output_dir: Optional[Path], error_dir: Path, i: int, t: datetime.datetime) -> Tuple[str, str, str]:
"""Overrides SchedulerModeInterface.generate_output_paths"""
timestamp = format_timestamp(t)
output_file = "aggregated_results.nxs"
output_fp = str(output_dir / output_file) if output_dir else output_file
stdout_fp = str(error_dir / f"out_{timestamp}_aggregated")
stderr_fp = str(error_dir / f"err_{timestamp}_aggregated")
return output_fp, stdout_fp, stderr_fp
def generate_args(self, i: int, _memory: str, _cores: int, jobscript_args: List[str],
output_fp: str) -> Tuple[str, ...]:
"""Overrides SchedulerModeInterface.generate_args"""
assert(i == 0)
jobscript = str(check_jobscript_is_readable(check_location(get_absolute_path(jobscript_args[0]))))
args = tuple([jobscript, "--output", output_fp] + self.sliced_results)
return args
| 47.333333 | 135 | 0.717768 | 226 | 1,846 | 5.535398 | 0.358407 | 0.041567 | 0.040767 | 0.038369 | 0.129496 | 0.059153 | 0 | 0 | 0 | 0 | 0 | 0.002661 | 0.185807 | 1,846 | 38 | 136 | 48.578947 | 0.829674 | 0.080715 | 0 | 0 | 0 | 0 | 0.066032 | 0.044021 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0.137931 | false | 0 | 0.241379 | 0 | 0.482759 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ab4684941494cc38e495118298b853deaf00158 | 23,767 | py | Python | nuscenes2kitti/nuscenes2kitti_util.py | simon3dv/frustum-convnet | 73cffa8e53af8a4f59255591cf2ba4af6916602c | [
"MIT"
] | null | null | null | nuscenes2kitti/nuscenes2kitti_util.py | simon3dv/frustum-convnet | 73cffa8e53af8a4f59255591cf2ba4af6916602c | [
"MIT"
] | null | null | null | nuscenes2kitti/nuscenes2kitti_util.py | simon3dv/frustum-convnet | 73cffa8e53af8a4f59255591cf2ba4af6916602c | [
"MIT"
] | null | null | null | """ Helper methods for loading and parsing nuscenes2kitti data.
Authod: Siming Fan
Acknowledge: Charles R. Qi
Date: Jan 2020
"""
from __future__ import print_function
import os
import cv2
import numpy as np
import ipdb
import mayavi.mlab as mlab
class Object3d(object):
''' 3d object label '''
def __init__(self, label_file_line):
data = label_file_line.split(' ')
data[1:] = [float(x) for x in data[1:]]
# extract label, truncation, occlusion
self.type = data[0] # 'Car', 'Pedestrian', ...
self.truncation = data[1] # truncated pixel ratio [0..1]
self.occlusion = int(data[2]) # 0=visible, 1=partly occluded, 2=fully occluded, 3=unknown
self.alpha = data[3] # object observation angle [-pi..pi]
# extract 2d bounding box in 0-based coordinates
self.xmin = data[4] # left
self.ymin = data[5] # top
self.xmax = data[6] # right
self.ymax = data[7] # bottom
self.box2d = np.array([self.xmin,self.ymin,self.xmax,self.ymax])
# extract 3d bounding box information
self.h = data[8] # box height
self.w = data[9] # box width
self.l = data[10] # box length (in meters)
self.t = (data[11],data[12],data[13]) # location (x,y,z) in camera coord.
self.ry = data[14] # yaw angle (around Y-axis in camera coordinates) [-pi..pi]
if len(data)>15:
self.score = data[15]
def print_object(self):
print('Type, truncation, occlusion, alpha: %s, %d, %d, %f' % \
(self.type, self.truncation, self.occlusion, self.alpha))
print('2d bbox (x0,y0,x1,y1): %f, %f, %f, %f' % \
(self.xmin, self.ymin, self.xmax, self.ymax))
print('3d bbox h,w,l: %f, %f, %f' % \
(self.h, self.w, self.l))
print('3d bbox location, ry: (%f, %f, %f), %f' % \
(self.t[0],self.t[1],self.t[2],self.ry))
class Calibration(object):
''' Calibration matrices and utils
3d XYZ in <label>.txt are in rect camera coord.
2d box xy are in image2 coord
Points in <lidar>.bin are in Velodyne coord.
y_image2 = P^2_rect * x_rect
y_image2 = P^2_rect * R0_rect * Tr_velo_to_cam * x_velo
x_ref = Tr_velo_to_cam * x_velo
x_rect = R0_rect * x_ref
P^2_rect = [f^2_u, 0, c^2_u, -f^2_u b^2_x;
0, f^2_v, c^2_v, -f^2_v b^2_y;
0, 0, 1, 0]
= K * [1|t]
image2 coord:
----> x-axis (u)
|
|
v y-axis (v)
velodyne coord:
front x, left y, up z
rect/ref camera coord:
right x, down y, front z
Ref (KITTI paper): http://www.cvlibs.net/publications/Geiger2013IJRR.pdf
TODO(rqi): do matrix multiplication only once for each projection.
P^2_rect = [f^2_u, 0, c^2_u, -f^2_u b^2_x;
0, f^2_v, c^2_v, -f^2_v b^2_y;
0, 0, 1, 0]
= K * [1|t]
P2:
P2: 7.070493000000e+02 0 6.040814000000e+02 4.575831000000e+01
0 7.070493000000e+02 1.805066000000e+02 -3.454157000000e-01
0 0 1 4.981016000000e-03
cam_intrinsic(CAM_FRONT):
CAM_FRONT:
1266.417203046554 0.0 816.2670197447984
0.0 1266.417203046554 491.50706579294757
0.0 0.0 1.0
'''
def __init__(self, calib_filepath, from_video=False, sensor_list = ['CAM_FRONT']):
if from_video:
calibs = self.read_calib_from_video(calib_filepath)
else:
calibs = self.read_calib_file(calib_filepath)
# Projection matrix from global coord to image2 coord
self.sensor_list = sensor_list
if 'CAM_FRONT' in self.sensor_list:
self.CAM_FRONT = np.reshape(calibs['CAM_FRONT'], [3, 3])
if 'CAM_BACK' in self.sensor_list:
self.CAM_BACK = np.reshape(calibs['CAM_BACK'], [3, 3])
if 'CAM_FRONT_LEFT' in self.sensor_list:
self.CAM_FRONT_LEFT = np.reshape(calibs['CAM_FRONT_LEFT'], [3, 3])
if 'CAM_BACK_LEFT' in self.sensor_list:
self.CAM_BACK_LEFT = np.reshape(calibs['CAM_BACK_LEFT'], [3, 3])
if 'CAM_FRONT_RIGHT' in self.sensor_list:
self.CAM_FRONT_RIGHT = np.reshape(calibs['CAM_FRONT_RIGHT'], [3, 3])
if 'CAM_BACK_RIGHT' in self.sensor_list:
self.CAM_BACK_RIGHT = np.reshape(calibs['CAM_BACK_RIGHT'], [3, 3])
self.lidar2ego_translation = np.reshape(calibs['lidar2ego_translation'], [3, 1])
self.lidar2ego_rotation = np.reshape(calibs['lidar2ego_rotation'], [3, 3])
self.ego2global_translation = np.reshape(calibs['ego2global_translation'], [3, 1])
self.ego2global_rotation = np.reshape(calibs['ego2global_rotation'], [3, 3])
for sensor in self.sensor_list:
for m in [ 'cam2ego_translation','ego2global_translation']:
attrt = sensor + '_'+ m
exec('self.'+attrt+' = np.reshape(calibs["'+attrt+'"],[3,1])')
for m in ['cam2ego_rotation','ego2global_rotation']:
attrt = sensor + '_'+ m
exec('self.'+attrt+' = np.reshape(calibs["'+attrt+'"],[3,3])')
#self.CAM_FRONT = calibs['CAM_FRONT']
#self.CAM_FRONT = np.reshape(self.CAM_FRONT, [3, 3])
# Rigid transform from Velodyne coord to reference camera coord
# self.V2C = calibs['Tr_velo_to_cam']
# self.V2C = np.reshape(self.V2C, [3,4])
# self.C2V = inverse_rigid_trans(self.V2C)
# Rotation from reference camera coord to rect camera coord
# self.R0 = calibs['R0_rect']
# self.R0 = np.reshape(self.R0,[3,3])
# Camera intrinsics and extrinsics
#self.c_u = self.CAM_FRONT[0,2]
#self.c_v = self.CAM_FRONT[1,2]
#self.f_u = self.CAM_FRONT[0,0]
#self.f_v = self.CAM_FRONT[1,1]
# self.b_x = self.P[0,3]/(-self.f_u) # relative
# self.b_y = self.P[1,3]/(-self.f_v)
def read_calib_file(self, filepath):
''' Read in a calibration file and parse into a dictionary.
Ref: https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py
'''
data = {}
with open(filepath, 'r') as f:
for line in f.readlines():
line = line.rstrip()
if len(line)==0: continue
key, value = line.split(':', 1)
# The only non-float values in these files are dates, which
# we don't care about anyway
try:
data[key] = np.array([float(x) for x in value.split()])
except ValueError:
pass
return data
def read_calib_from_video(self, calib_root_dir):
''' Read calibration for camera 2 from video calib files.
there are calib_cam_to_cam and calib_velo_to_cam under the calib_root_dir
'''
data = {}
cam2cam = self.read_calib_file(os.path.join(calib_root_dir, 'calib_cam_to_cam.txt'))
velo2cam = self.read_calib_file(os.path.join(calib_root_dir, 'calib_velo_to_cam.txt'))
Tr_velo_to_cam = np.zeros((3,4))
Tr_velo_to_cam[0:3,0:3] = np.reshape(velo2cam['R'], [3,3])
Tr_velo_to_cam[:,3] = velo2cam['T']
data['Tr_velo_to_cam'] = np.reshape(Tr_velo_to_cam, [12])
data['R0_rect'] = cam2cam['R_rect_00']
data['P2'] = cam2cam['P_rect_02']
return data
def cart2hom(self, pts_3d):
''' Input: nx3 points in Cartesian
Oupput: nx4 points in Homogeneous by pending 1
'''
n = pts_3d.shape[0]
pts_3d_hom = np.hstack((pts_3d, np.ones((n,1))))
return pts_3d_hom
# ===========================
# ------- 3d to 3d ----------
# ===========================
# input:3xn
# output:3xn
# tips: not nx3!
'''
def translate(self, points, x):
"""
Applies a translation to the point cloud.
:param x: <np.float: 3, 1>. Translation in x, y, z.
"""
for i in range(3):
points[i, :] = points[i, :] + x[i]
return points
'''
def translate(self, points, x):
"""
Applies a translation to the point cloud.
:param x: <np.float: 3, 1>. Translation in x, y, z.
"""
pts = points.copy()
for i in range(3):
pts[i, :] = pts[i, :] + x[i]
return pts
def rotate(self, points, rot_matrix):
"""
Applies a rotation.
:param rot_matrix: <np.float: 3, 3>. Rotation matrix.
"""
return np.dot(rot_matrix, points[:, :])
# ====lidar - ego(lidar) - global - ego_cam - cam====
def project_lidar_to_ego(self, pts_3d_velo):
pts_3d_ego = self.rotate(pts_3d_velo, getattr(self, 'lidar2ego_rotation'))
pts_3d_ego = self.translate(pts_3d_ego, getattr(self, 'lidar2ego_translation'))
return pts_3d_ego
def project_ego_to_lidar(self, pts_3d_ego):
pts_3d_velo = self.translate(pts_3d_ego, -getattr(self, 'lidar2ego_translation'))
pts_3d_velo = self.rotate(pts_3d_velo, getattr(self, 'lidar2ego_rotation').T)
return pts_3d_velo
def project_ego_to_global(self, pts_3d_ego):
pts_3d_global = self.rotate(pts_3d_ego, getattr(self, 'ego2global_rotation'))
pts_3d_global = self.translate(pts_3d_global, getattr(self, 'ego2global_translation'))
return pts_3d_global
def project_global_to_ego(self, pts_3d_global):
pts_3d_ego = self.translate(pts_3d_global, -getattr(self, 'ego2global_translation'))
pts_3d_ego = self.rotate(pts_3d_ego, getattr(self, 'ego2global_rotation').T)
return pts_3d_ego
def project_cam_to_ego(self, pts_3d_cam, sensor):
pts_3d_ego_cam = self.rotate(pts_3d_cam, getattr(self, sensor + '_' + 'cam2ego_rotation'))
pts_3d_ego_cam = self.translate(pts_3d_ego_cam, getattr(self,sensor+'_'+'cam2ego_translation'))
return pts_3d_ego_cam
def project_ego_to_cam(self, pts_3d_ego_cam, sensor):
pts_3d_cam = self.translate(pts_3d_ego_cam, -getattr(self,sensor+'_'+'cam2ego_translation'))
pts_3d_cam = self.rotate(pts_3d_cam, getattr(self, sensor + '_' + 'cam2ego_rotation').T)
return pts_3d_cam
def project_ego_to_global_cam(self, pts_3d_ego_cam, sensor):
pts_3d_global_cam = self.rotate(pts_3d_ego_cam, getattr(self, sensor + '_' + 'ego2global_rotation'))
pts_3d_global_cam = self.translate(pts_3d_global_cam, getattr(self,sensor+'_'+'ego2global_translation'))
return pts_3d_global_cam
def project_global_to_ego_cam(self, pts_3d_global_cam, sensor):
pts_3d_ego_cam = self.translate(pts_3d_global_cam, -getattr(self,sensor+'_'+'ego2global_translation'))
pts_3d_ego_cam = self.rotate(pts_3d_ego_cam, getattr(self, sensor + '_' + 'ego2global_rotation').T)
return pts_3d_ego_cam
# ====lidar - global - cam====
def project_global_to_lidar(self, pts_3d_global):
pts_3d_ego = self.project_global_to_ego(pts_3d_global)
pts_3d_velo = self.project_ego_to_lidar(pts_3d_ego)
return pts_3d_velo
def project_lidar_to_global(self, pts_3d_velo):
pts_3d_ego = self.project_lidar_to_ego(pts_3d_velo)
pts_3d_global = self.project_ego_to_global(pts_3d_ego)
return pts_3d_global
def project_cam_to_global(self, pts_3d_cam, sensor):
pts_3d_ego_cam = self.project_cam_to_ego(pts_3d_cam, sensor)
pts_3d_global_cam = self.project_ego_to_global_cam(pts_3d_ego_cam, sensor)
return pts_3d_global_cam
def project_global_to_cam(self, pts_3d_global_cam, sensor):
pts_3d_ego_cam = self.project_global_to_ego_cam(pts_3d_global_cam, sensor)
pts_3d_cam = self.project_ego_to_cam(pts_3d_ego_cam, sensor)
return pts_3d_cam
#=========intrinsic=========#
def project_image_to_cam(self, uv_depth, sensor):
''' Input: 3xn first two channels are uv, 3rd channel
is depth in rect camera coord.
Output: 3xn points in (rect) camera coord.
'''
# Camera intrinsics and extrinsics
c_u = getattr(self,sensor)[0,2]
c_v = getattr(self,sensor)[1,2]
f_u = getattr(self,sensor)[0,0]
f_v = getattr(self,sensor)[1,1]
n = uv_depth.shape[1]
x = ((uv_depth[0,:]-c_u)*uv_depth[2,:])/f_u
y = ((uv_depth[1,:]-c_v)*uv_depth[2,:])/f_v
pts_3d_cam = np.zeros((3,n))
pts_3d_cam[0,:] = x
pts_3d_cam[1,:] = y
pts_3d_cam[2,:] = uv_depth[2,:]
return pts_3d_cam
def project_cam_to_image(self, pts_3d_cam, sensor):
pts_2d = view_points(pts_3d_cam[:3, :], getattr(self,sensor), normalize=True)#(3,n)
return pts_2d
"""
def project_global_to_velo(self, pts_3d_global):
''' Input: nx3 points in rect camera coord.
Output: nx3 points in velodyne coord.
'''
#pts_3d_ref = self.project_rect_to_ref(pts_3d_rect)
#return self.project_ref_to_velo(pts_3d_ref)
pts_3d_velo = pts_3d_global[:,[0,2,1]]
pts_3d_velo[:,2] *= -1
return pts_3d_velo
"""
def rotx(t):
''' 3D Rotation about the x-axis. '''
c = np.cos(t)
s = np.sin(t)
return np.array([[1, 0, 0],
[0, c, -s],
[0, s, c]])
def roty(t):
''' Rotation about the y-axis. '''
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def rotz(t):
''' Rotation about the z-axis. '''
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
def transform_from_rot_trans(R, t):
''' Transforation matrix from rotation matrix and translation vector. '''
R = R.reshape(3, 3)
t = t.reshape(3, 1)
return np.vstack((np.hstack([R, t]), [0, 0, 0, 1]))
def inverse_rigid_trans(Tr):
''' Inverse a rigid body transform matrix (3x4 as [R|t])
[R'|-R't; 0|1]
'''
inv_Tr = np.zeros_like(Tr) # 3x4
inv_Tr[0:3,0:3] = np.transpose(Tr[0:3,0:3])
inv_Tr[0:3,3] = np.dot(-np.transpose(Tr[0:3,0:3]), Tr[0:3,3])
return inv_Tr
def read_label(label_filename):
lines = [line.rstrip() for line in open(label_filename)]
objects = [Object3d(line) for line in lines]
return objects
def load_image(img_filename):
return cv2.imread(img_filename)
def load_velo_scan(velo_filename):
scan = np.fromfile(velo_filename, dtype=np.float32)
scan = scan.reshape((-1, 4))
return scan
def project_to_image(pts_3d, P):
''' Project 3d points to image plane.
Usage: pts_2d = projectToImage(pts_3d, P)
input: pts_3d: nx3 matrix
P: 3x4 projection matrix
output: pts_2d: nx2 matrix
P(3x4) dot pts_3d_extended(4xn) = projected_pts_2d(3xn)
=> normalize projected_pts_2d(2xn)
<=> pts_3d_extended(nx4) dot P'(4x3) = projected_pts_2d(nx3)
=> normalize projected_pts_2d(nx2)
'''
n = pts_3d.shape[0]
pts_3d_extend = np.hstack((pts_3d, np.ones((n,1))))
print(('pts_3d_extend shape: ', pts_3d_extend.shape))
pts_2d = np.dot(pts_3d_extend, np.transpose(P)) # nx3
pts_2d[:,0] /= pts_2d[:,2]
pts_2d[:,1] /= pts_2d[:,2]
return pts_2d[:,0:2]
def view_points(points: np.ndarray, view: np.ndarray, normalize: bool) -> np.ndarray:
"""
This is a helper class that maps 3d points to a 2d plane. It can be used to implement both perspective and
orthographic projections. It first applies the dot product between the points and the view. By convention,
the view should be such that the data is projected onto the first 2 axis. It then optionally applies a
normalization along the third dimension.
For a perspective projection the view should be a 3x3 camera matrix, and normalize=True
For an orthographic projection with translation the view is a 3x4 matrix and normalize=False
For an orthographic projection without translation the view is a 3x3 matrix (optionally 3x4 with last columns
all zeros) and normalize=False
:param points: <np.float32: 3, n> Matrix of points, where each point (x, y, z) is along each column.
:param view: <np.float32: n, n>. Defines an arbitrary projection (n <= 4).
The projection should be such that the corners are projected onto the first 2 axis.
:param normalize: Whether to normalize the remaining coordinate (along the third axis).
:return: <np.float32: 3, n>. Mapped point. If normalize=False, the third coordinate is the height.
"""
assert view.shape[0] <= 4
assert view.shape[1] <= 4
assert points.shape[0] == 3
viewpad = np.eye(4)
viewpad[:view.shape[0], :view.shape[1]] = view
nbr_points = points.shape[1]
# Do operation in homogenous coordinates.
points = np.concatenate((points, np.ones((1, nbr_points))))
points = np.dot(viewpad, points)
points = points[:3, :]
if normalize:
points = points / points[2:3, :].repeat(3, 0).reshape(3, nbr_points)
return points
def compute_box_3d(obj,view):
''' Takes an object and a projection matrix (P) and projects the 3d
bounding box into the image plane.
Returns:
corners_2d: (8,2) array in left image coord.
corners_3d: (8,3) array in in rect camera coord.
'''
# compute rotational matrix around yaw axis
R = roty(obj.ry)
# 3d bounding box dimensions
l = obj.l;
w = obj.w;
h = obj.h;
# 3d bounding box corners
x_corners = [l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2];
y_corners = [0,0,0,0,-h,-h,-h,-h];
z_corners = [w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2];
# x_corners = l / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1])
# y_corners = w / 2 * np.array([1, -1, -1, 1, 1, -1, -1, 1])
# z_corners = h / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1])
# rotate and translate 3d bounding box
corners_3d = np.dot(R, np.vstack([x_corners,y_corners,z_corners]))
#print corners_3d.shape
corners_3d[0,:] = corners_3d[0,:] + obj.t[0];
corners_3d[1,:] = corners_3d[1,:] + obj.t[1];
corners_3d[2,:] = corners_3d[2,:] + obj.t[2];
#print 'cornsers_3d: ', corners_3d
# only draw 3d bounding box for objs in front of the camera
'''
if np.any(corners_3d[2,:]<0.1):
corners_2d = None
return corners_2d, np.transpose(corners_3d)
'''
# project the 3d bounding box into the image plane
# corners_2d = project_to_image(np.transpose(corners_3d), P);
#sensor = 'CAM_FRONT'
#view = getattr(calib,sensor)# 3x3
corners_2d = view_points(corners_3d, view, normalize=True)[:2, :].T#2x8, mean=590.067...
#print 'corners_2d: ', corners_2d
return corners_2d, np.transpose(corners_3d)
def compute_orientation_3d(obj, view):
''' Takes an object and a projection matrix (P) and projects the 3d
object orientation vector into the image plane.
Returns:
orientation_2d: (2,2) array in left image coord.
orientation_3d: (2,3) array in in rect camera coord.
'''
# compute rotational matrix around yaw axis
R = roty(obj.ry)
# orientation in object coordinate system
orientation_3d = np.array([[0.0, obj.l],[0,0],[0,0]])
# rotate and translate in camera coordinate system, project in image
orientation_3d = np.dot(R, orientation_3d)
orientation_3d[0,:] = orientation_3d[0,:] + obj.t[0]
orientation_3d[1,:] = orientation_3d[1,:] + obj.t[1]
orientation_3d[2,:] = orientation_3d[2,:] + obj.t[2]
# vector behind image plane?
if np.any(orientation_3d[2,:]<0.1):
orientation_2d = None
return orientation_2d, np.transpose(orientation_3d)
# project orientation into the image plane
# orientation_2d = project_to_image(np.transpose(orientation_3d), P);
orientation_2d = view_points(orientation_3d, view, normalize=True)[:2, :]
return orientation_2d, np.transpose(orientation_3d)
def draw_projected_box3d(image, qs, color=(255,255,255), thickness=2):
''' Draw 3d bounding box in image
qs: (8,3) array of vertices for the 3d box in following order:
1 -------- 0
/| /|
2 -------- 3 .
| | | |
. 5 -------- 4
|/ |/
6 -------- 7
'''
qs = qs.astype(np.int32)
for k in range(0,4):
# Ref: http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
i,j=k,(k+1)%4
# use LINE_AA for opencv3
cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.LINE_AA)
i,j=k+4,(k+1)%4 + 4
cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.LINE_AA)
i,j=k,k+4
cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.LINE_AA)
return image
def draw_nusc_lidar(pc, color=None, fig=None, bgcolor=(0, 0, 0), pts_scale=1, pts_mode='point', pts_color=None):
''' Draw lidar points
Args:
pc: numpy array (n,3) of XYZ
color: numpy array (n) of intensity or whatever
fig: mayavi figure handler, if None create new one otherwise will use it
Returns:
fig: created or used fig
'''
if fig is None: fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(1600, 1000))
if color is None: color = pc[:, 2]
mlab.points3d(pc[:, 0], pc[:, 1], pc[:, 2], color, color=pts_color, mode=pts_mode, colormap='gnuplot',
scale_factor=pts_scale, figure=fig)
# draw origin
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2)
# draw axis
axes = np.array([
[2., 0., 0., 0.],
[0., 2., 0., 0.],
[0., 0., 2., 0.],
], dtype=np.float64)
mlab.plot3d([0, axes[0, 0]], [0, axes[0, 1]], [0, axes[0, 2]], color=(1, 0, 0), tube_radius=None, figure=fig)
mlab.plot3d([0, axes[1, 0]], [0, axes[1, 1]], [0, axes[1, 2]], color=(0, 1, 0), tube_radius=None, figure=fig)
mlab.plot3d([0, axes[2, 0]], [0, axes[2, 1]], [0, axes[2, 2]], color=(0, 0, 1), tube_radius=None, figure=fig)
# draw fov (todo: update to real sensor spec.)
fov = np.array([ # 45 degree
[20., 20., 0., 0.],
[-20., 20., 0., 0.],
], dtype=np.float64)
mlab.plot3d([0, fov[0, 0]], [0, fov[0, 1]], [0, fov[0, 2]], color=(1, 1, 1), tube_radius=None, line_width=1,
figure=fig)
mlab.plot3d([0, fov[1, 0]], [0, fov[1, 1]], [0, fov[1, 2]], color=(1, 1, 1), tube_radius=None, line_width=1,
figure=fig)
# draw square region
TOP_Y_MIN = 0#-20
TOP_Y_MAX = 40#20
TOP_X_MIN = -20#0
TOP_X_MAX = 20#40
TOP_Z_MIN = -2.0
TOP_Z_MAX = 0.4
x1 = TOP_X_MIN
x2 = TOP_X_MAX
y1 = TOP_Y_MIN
y2 = TOP_Y_MAX
mlab.plot3d([x1, x1], [y1, y2], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=0.1, line_width=1, figure=fig)
mlab.plot3d([x2, x2], [y1, y2], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=0.1, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y1, y1], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=0.1, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y2, y2], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=0.1, line_width=1, figure=fig)
# mlab.orientation_axes()
mlab.view(azimuth=180, elevation=70, focalpoint=[12.0909996, -1.04700089, -2.03249991], distance=62.0, figure=fig)
return fig | 39.677796 | 118 | 0.596626 | 3,652 | 23,767 | 3.690307 | 0.133352 | 0.038213 | 0.018995 | 0.013059 | 0.38102 | 0.298731 | 0.26267 | 0.216814 | 0.195073 | 0.151369 | 0 | 0.063947 | 0.260445 | 23,767 | 599 | 119 | 39.677796 | 0.702793 | 0.307653 | 0 | 0.128814 | 0 | 0.00339 | 0.06654 | 0.014651 | 0 | 0 | 0 | 0.003339 | 0.010169 | 1 | 0.122034 | false | 0.00339 | 0.020339 | 0.00339 | 0.264407 | 0.023729 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ab5e36ae2ed9641a80722db087527c9239541f9 | 27,590 | py | Python | tools/utilities/pythonlibs/audio/training/train_classifier.py | awf/ELL | 25c94a1422efc41d5560db11b136f9d8f957ad41 | [
"MIT"
] | null | null | null | tools/utilities/pythonlibs/audio/training/train_classifier.py | awf/ELL | 25c94a1422efc41d5560db11b136f9d8f957ad41 | [
"MIT"
] | null | null | null | tools/utilities/pythonlibs/audio/training/train_classifier.py | awf/ELL | 25c94a1422efc41d5560db11b136f9d8f957ad41 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
###################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: train_classifier.py
# Authors: Chris Lovett
#
# Requires: Python 3.x
#
###################################################################################################
import argparse
import json
import math
import os
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import torch.onnx
from torch.utils.data import Dataset, DataLoader
from training_config import TrainingConfig
class TriangularLR(optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, stepsize, lr_min, lr_max, gamma):
self.stepsize = stepsize
self.lr_min = lr_min
self.lr_max = lr_max
self.gamma = gamma
super(TriangularLR, self).__init__(optimizer)
def get_lr(self):
it = self.last_epoch
cycle = math.floor(1 + it / (2 * self.stepsize))
x = abs(it / self.stepsize - 2 * cycle + 1)
decayed_range = (self.lr_max - self.lr_min) * self.gamma ** (it / 3)
lr = self.lr_min + decayed_range * x
return [lr]
class ExponentialResettingLR(optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, gamma, reset_epoch):
self.gamma = gamma
self.reset_epoch = int(reset_epoch)
super(ExponentialResettingLR, self).__init__(optimizer)
def get_lr(self):
epoch = self.last_epoch
if epoch > self.reset_epoch:
epoch -= self.reset_epoch
return [base_lr * self.gamma ** epoch
for base_lr in self.base_lrs]
class KeywordSpotter(nn.Module):
""" This baseclass provides the PyTorch Module pattern for defining and training keyword spotters """
def __init__(self, input_dim, num_keywords, batch_first=False):
"""
Initialize the KeywordSpotter with the following parameters:
input_dim - the size of the input audio frame in # samples
num_keywords - the number of predictions to come out of the model.
"""
super(KeywordSpotter, self).__init__()
self.input_dim = input_dim
self.num_keywords = num_keywords
self.batch_first = batch_first
self.init_hidden()
def name(self):
return "KeywordSpotter"
def init_hidden(self):
""" Clear any hidden state """
pass
def forward(self, input):
""" Perform the forward processing of the given input and return the prediction """
raise Exception("need to implement the forward method")
def export(self, name, device):
""" Export the model to the ONNX file format """
self.init_hidden()
dummy_input = Variable(torch.randn(1, 1, self.input_dim))
if device:
dummy_input = dummy_input.to(device)
torch.onnx.export(self, dummy_input, name, verbose=True)
def batch_accuracy(self, scores, labels):
""" Compute the training accuracy of the results of a single mini-batch """
batch_size = scores.shape[0]
passed = 0
results = []
for i in range(batch_size):
expected = labels[i]
actual = scores[i].argmax()
results += [int(actual)]
if expected == actual:
passed += 1
return (float(passed) * 100.0 / float(batch_size), passed, results)
def fit(self, training_data, validation_data, options, device=None, detail=False):
"""
Perform the training. This is not called "train" because the base class already defines
that method with a different meaning. The base class "train" method puts the Module into
"training mode".
"""
print("Training {} using {} rows of featurized training input...".format(self.name(), training_data.num_rows))
start = time.time()
loss_function = nn.NLLLoss()
initial_rate = options.learning_rate
lr_scheduler = options.lr_scheduler
oo = options.optimizer_options
if options.optimizer == "Adadelta":
optimizer = optim.Adadelta(self.parameters(), lr=initial_rate, weight_decay=oo.weight_decay,
rho=oo.rho, eps=oo.eps)
elif options.optimizer == "Adagrad":
optimizer = optim.Adagrad(self.parameters(), lr=initial_rate, weight_decay=oo.weight_decay,
lr_decay=oo.lr_decay)
elif options.optimizer == "Adam":
optimizer = optim.Adam(self.parameters(), lr=initial_rate, weight_decay=oo.weight_decay,
betas=oo.betas, eps=oo.eps)
elif options.optimizer == "Adamax":
optimizer = optim.Adamax(self.parameters(), lr=initial_rate, weight_decay=oo.weight_decay,
betas=oo.betas, eps=oo.eps)
elif options.optimizer == "ASGD":
optimizer = optim.ASGD(self.parameters(), lr=initial_rate, weight_decay=oo.weight_decay,
lambd=oo.lambd, alpha=oo.alpha, t0=oo.t0)
elif options.optimizer == "RMSprop":
optimizer = optim.RMSprop(self.parameters(), lr=initial_rate, weight_decay=oo.weight_decay,
eps=oo.eps, alpha=oo.alpha, momentum=oo.momentum, centered=oo.centered)
elif options.optimizer == "Rprop":
optimizer = optim.Rprop(self.parameters(), lr=initial_rate, etas=oo.etas,
step_sizes=oo.step_sizes)
elif options.optimizer == "SGD":
optimizer = optim.SGD(self.parameters(), lr=initial_rate, weight_decay=oo.weight_decay,
momentum=oo.momentum, dampening=oo.dampening)
print(optimizer)
num_epochs = options.max_epochs
batch_size = options.batch_size
learning_rate = options.learning_rate
lr_min = options.lr_min
lr_peaks = options.lr_peaks
ticks = training_data.num_rows / batch_size # iterations per epoch
total_iterations = ticks * num_epochs
gamma = options.lr_gamma
if not lr_min:
lr_min = learning_rate
scheduler = None
if lr_scheduler == "TriangleLR":
steps = lr_peaks * 2 + 1
stepsize = num_epochs / steps
scheduler = TriangularLR(optimizer, stepsize * ticks, lr_min, learning_rate, gamma)
elif lr_scheduler == "CosineAnnealingLR":
# divide by odd number to finish on the minimum learning rate
cycles = lr_peaks * 2 + 1
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=total_iterations / cycles,
eta_min=lr_min)
elif lr_scheduler == "ExponentialLR":
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma)
elif lr_scheduler == "StepLR":
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=options.lr_step_size, gamma=gamma)
elif lr_scheduler == "ExponentialResettingLR":
reset = (num_epochs * ticks) / 3 # reset at the 1/3 mark.
scheduler = ExponentialResettingLR(optimizer, gamma, reset)
# optimizer = optim.Adam(model.parameters(), lr=0.0001)
log = []
for epoch in range(num_epochs):
self.train()
iteration = 0
for i_batch, (audio, labels) in enumerate(training_data.get_data_loader(batch_size)):
if not self.batch_first:
audio = audio.transpose(1, 0) # GRU wants seq,batch,feature
if device:
audio = audio.to(device)
labels = labels.to(device)
# Also, we need to clear out the hidden state,
# detaching it from its history on the last instance.
self.init_hidden()
# Before the backward pass, use the optimizer object to zero all of the
# gradients for the variables it will update (which are the learnable
# weights of the model). This is because by default, gradients are
# accumulated in buffers( i.e, not overwritten) whenever .backward()
# is called. Checkout docs of torch.autograd.backward for more details.
optimizer.zero_grad()
# Run our forward pass.
keyword_scores = self(audio)
# Compute the loss, gradients
loss = loss_function(keyword_scores, labels)
# Backward pass: compute gradient of the loss with respect to all the learnable
# parameters of the model. Internally, the parameters of each Module are stored
# in Tensors with requires_grad=True, so this call will compute gradients for
# all learnable parameters in the model.
loss.backward()
# move to next learning rate
if scheduler:
scheduler.step()
# Calling the step function on an Optimizer makes an update to its parameters
# applying the gradients we computed during back propagation
optimizer.step()
learning_rate = optimizer.param_groups[0]['lr']
if detail:
learning_rate = optimizer.param_groups[0]['lr']
log += [{'iteration': iteration, 'loss': loss.item(), 'learning_rate': learning_rate}]
iteration += 1
# Find the best prediction in each sequence and return it's accuracy
passed, total, rate = self.evaluate(validation_data, batch_size, device)
learning_rate = optimizer.param_groups[0]['lr']
print("Epoch {}, Loss {}, Validation Accuracy {:.3f}, Learning Rate {}".format(
epoch, loss.item(), rate * 100, learning_rate))
log += [{'epoch': epoch, 'loss': loss.item(), 'accuracy': rate, 'learning_rate': learning_rate}]
end = time.time()
print("Trained in {:.2f} seconds".format(end - start))
return log
def evaluate(self, test_data, batch_size, device=None, outfile=None):
"""
Evaluate the given test data and print the pass rate
"""
self.eval()
passed = 0
total = 0
self.zero_grad()
results = []
with torch.no_grad():
for i_batch, (audio, labels) in enumerate(test_data.get_data_loader(batch_size)):
batch_size = audio.shape[0]
audio = audio.transpose(1, 0) # GRU wants seq,batch,feature
if device:
audio = audio.to(device)
labels = labels.to(device)
total += batch_size
self.init_hidden()
keyword_scores = self(audio)
last_accuracy, ok, actual = self.batch_accuracy(keyword_scores, labels)
results += actual
passed += ok
if outfile:
print("Saving evaluation results in '{}'".format(outfile))
with open(outfile, "w") as f:
json.dump(results, f)
return (passed, total, passed / total)
class GRUKeywordSpotter(KeywordSpotter):
"""This class is a PyTorch Module that implements a 1, 2 or 3 layer GRU based audio classifier"""
def __init__(self, input_dim, num_keywords, hidden_dim, num_layers):
"""
Initialize the KeywordSpotter with the following parameters:
input_dim - the size of the input audio frame in # samples.
hidden_dim - the size of the hidden state of the GRU nodes
num_keywords - the number of predictions to come out of the model.
num_layers - the number of GRU layers to use (1, 2 or 3)
"""
self.hidden_dim = hidden_dim
self.num_layers = num_layers
super(GRUKeywordSpotter, self).__init__(input_dim, num_keywords)
# The GRU takes audio sequences as input, and outputs hidden states
# with dimensionality hidden_dim.
self.gru1 = nn.GRU(input_dim, hidden_dim)
self.gru2 = None
if num_layers > 1:
self.gru2 = nn.GRU(hidden_dim, hidden_dim)
self.gru3 = None
last_output_size = hidden_dim
if num_layers > 2:
self.gru3 = nn.GRU(hidden_dim, num_keywords)
last_output_size = num_keywords
# The linear layer is a fully connected layer that maps from hidden state space
# to number of expected keywords
self.hidden2keyword = nn.Linear(last_output_size, num_keywords)
self.init_hidden()
def name(self):
return "{} layer GRU {}".format(self.num_layers, self.hidden_dim)
def init_hidden(self):
""" Clear the hidden state for the GRU nodes """
self.hidden1 = None
self.hidden2 = None
self.hidden3 = None
def forward(self, input):
""" Perform the forward processing of the given input and return the prediction """
# input is shape: [seq,batch,feature]
gru_out, self.hidden1 = self.gru1(input, self.hidden1)
if self.gru2 is not None:
gru_out, self.hidden2 = self.gru2(gru_out, self.hidden2)
if self.gru3 is not None:
gru_out, self.hidden3 = self.gru3(gru_out, self.hidden3)
keyword_space = self.hidden2keyword(gru_out)
result = F.log_softmax(keyword_space, dim=2)
# return the mean across the sequence length to produce the
# best prediction of which word exists in that sequence.
# we can do that because we know each window_size sequence in
# the training dataset contains at most one word.
result = result.mean(dim=0)
return result
class LSTMKeywordSpotter(KeywordSpotter):
"""This class is a PyTorch Module that implements a 1, 2 or 3 layer LSTM based audio classifier"""
def __init__(self, input_dim, num_keywords, hidden_dim, num_layers):
"""
Initialize the KeywordSpotter with the following parameters:
input_dim - the size of the input audio frame in # samples.
hidden_dim - the size of the hidden state of the LSTM nodes
num_keywords - the number of predictions to come out of the model.
num_layers - the number of LSTM layers to use (1, 2 or 3)
"""
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.input_dim = input_dim
super(LSTMKeywordSpotter, self).__init__(input_dim, num_keywords)
# The LSTM takes audio sequences as input, and outputs hidden states
# with dimensionality hidden_dim.
self.lstm1 = nn.LSTM(input_dim, hidden_dim)
self.lstm2 = None
if num_layers > 1:
self.lstm2 = nn.LSTM(hidden_dim, hidden_dim)
self.lstm3 = None
last_output_size = hidden_dim
if num_layers > 2:
# layer 3 can reduce output to num_keywords, this makes for a smaller
# layer and a much smaller Linear layer below so we get some of the
# size back.
self.lstm3 = nn.LSTM(hidden_dim, num_keywords)
last_output_size = num_keywords
# The linear layer is a fully connected layer that maps from hidden state space
# to number of expected keywords
self.hidden2keyword = nn.Linear(last_output_size, num_keywords)
self.init_hidden()
def name(self):
return "{} layer LSTM {}".format(self.num_layers, self.hidden_dim)
def init_hidden(self):
""" Clear the hidden state for the LSTM nodes """
self.hidden1 = None
self.hidden2 = None
self.hidden3 = None
def forward(self, input):
""" Perform the forward processing of the given input and return the prediction """
# input is shape: [seq,batch,feature]
lstm_out, self.hidden1 = self.lstm1(input, self.hidden1)
if self.lstm2 is not None:
lstm_out, self.hidden2 = self.lstm2(lstm_out, self.hidden2)
if self.lstm3 is not None:
lstm_out, self.hidden3 = self.lstm3(lstm_out, self.hidden3)
keyword_space = self.hidden2keyword(lstm_out)
result = F.log_softmax(keyword_space, dim=2)
# return the mean across the sequence length to produce the
# best prediction of which word exists in that sequence.
# we can do that because we know each window_size sequence in
# the training dataset contains at most one word.
result = result.mean(dim=0)
return result
class AudioDataset(Dataset):
"""
Featurized Audio in PyTorch Dataset so we can get a DataLoader that is needed for
mini-batch training.
"""
def __init__(self, filename, keywords):
""" Initialize the AudioDataset from the given *.npz file """
self.dataset = np.load(filename)
# get parameters saved by make_dataset.py
parameters = self.dataset["parameters"]
self.sample_rate = int(parameters[0])
self.audio_size = int(parameters[1])
self.input_size = int(parameters[2])
self.window_size = int(parameters[3])
self.shift = int(parameters[4])
self.features = self.dataset["features"].astype(np.float32)
self.num_rows = len(self.features)
self.features = self.features.reshape((self.num_rows, self.window_size, self.input_size))
self.label_names = self.dataset["labels"]
self.keywords = keywords
self.num_keywords = len(self.keywords)
self.labels = self.to_long_vector()
msg = "Loaded dataset {} and found sample rate {}, audio_size {}, input_size {}, window_size {} and shift {}"
print(msg.format(os.path.basename(filename), self.sample_rate, self.audio_size, self.input_size,
self.window_size, self.shift))
def get_data_loader(self, batch_size):
""" Get a DataLoader that can enumerate shuffled batches of data in this dataset """
return DataLoader(self, batch_size=batch_size, shuffle=True, drop_last=True)
def to_long_vector(self):
""" convert the expected labels to a list of integer indexes into the array of keywords """
indexer = [(0 if x == "<null>" else self.keywords.index(x)) for x in self.label_names]
return np.array(indexer, dtype=np.longlong)
def __len__(self):
""" Return the number of rows in this Dataset """
return self.num_rows
def __getitem__(self, idx):
""" Return a single labelled sample here as a tuple """
audio = self.features[idx] # batch index is second dimension
label = self.labels[idx]
sample = (audio, label)
return sample
def create_model(arch, input_size, num_keywords, hidden_units, num_layers):
if arch == "GRU":
return GRUKeywordSpotter(input_size, num_keywords, hidden_units, num_layers)
elif arch == "LSTM":
return LSTMKeywordSpotter(input_size, num_keywords, hidden_units, num_layers)
else:
raise Exception("Model architecture '{}' not supported".format(arch))
def save_json(obj, filename):
with open(filename, "w") as f:
json.dump(obj, f, indent=2)
def train(config, evaluate_only=False, outdir=".", detail=False):
filename = config.model.filename
categories_file = config.dataset.categories
wav_directory = config.dataset.path
batch_size = config.training.batch_size
hidden_units = config.model.hidden_units
architecture = config.model.architecture
num_layers = config.model.num_layers
use_gpu = config.training.use_gpu
valid_layers = [1, 2, 3]
if num_layers not in valid_layers:
raise Exception("--num_layers can only be one of these values {}".format(valid_layers))
if not os.path.isdir(outdir):
os.makedirs(outdir)
if not filename:
filename = "{}{}KeywordSpotter.pt".format(architecture, hidden_units)
config.model.filename = filename
# load the featurized data
if not os.path.isdir(wav_directory):
print("### Error: please specify valid --dataset folder location: {}".format(wav_directory))
sys.exit(1)
if not categories_file:
categories_file = os.path.join(wav_directory, "categories.txt")
with open(categories_file, "r") as f:
keywords = [x.strip() for x in f.readlines()]
training_file = os.path.join(wav_directory, "training_list.npz")
testing_file = os.path.join(wav_directory, "testing_list.npz")
validation_file = os.path.join(wav_directory, "validation_list.npz")
if not os.path.isfile(training_file):
print("Missing file {}".format(training_file))
print("Please run make_datasets.py")
sys.exit(1)
if not os.path.isfile(validation_file):
print("Missing file {}".format(validation_file))
print("Please run make_datasets.py")
sys.exit(1)
if not os.path.isfile(testing_file):
print("Missing file {}".format(testing_file))
print("Please run make_datasets.py")
sys.exit(1)
model = None
device = torch.device("cpu")
if use_gpu:
if torch.cuda.is_available():
device = torch.device("cuda")
else:
print("### CUDA not available!!")
print("Loading {}...".format(testing_file))
test_data = AudioDataset(testing_file, keywords)
log = None
if not evaluate_only:
print("Loading {}...".format(training_file))
training_data = AudioDataset(training_file, keywords)
print("Loading {}...".format(validation_file))
validation_data = AudioDataset(validation_file, keywords)
print("Training model {}".format(filename))
model = create_model(architecture, training_data.input_size, training_data.num_keywords, hidden_units,
num_layers)
if device.type == 'cuda':
model.cuda() # move the processing to GPU
start = time.time()
log = model.fit(training_data, validation_data, config.training, device, detail)
end = time.time()
passed, total, rate = model.evaluate(training_data, batch_size, device)
print("Training accuracy = {:.3f} %".format(rate * 100))
torch.save(model.state_dict(), os.path.join(outdir, filename))
print("Evaluating {} keyword spotter using {} rows of featurized test audio...".format(
architecture, test_data.num_rows))
if model is None:
msg = "Loading trained model with input size {}, hidden units {} and num keywords {}"
print(msg.format(test_data.input_size, hidden_units, test_data.num_keywords))
model = create_model(architecture, test_data.input_size, test_data.num_keywords, hidden_units, num_layers)
model.load_state_dict(torch.load(filename))
if model and device.type == 'cuda':
model.cuda() # move the processing to GPU
results_file = os.path.join(outdir, "results.txt")
passed, total, rate = model.evaluate(test_data, batch_size, device, results_file)
print("Testing accuracy = {:.3f} %".format(rate * 100))
if not evaluate_only:
name = os.path.splitext(filename)[0] + ".onnx"
print("saving onnx file: {}".format(name))
model.export(os.path.join(outdir, name), device)
config.dataset.sample_rate = test_data.sample_rate
config.dataset.input_size = test_data.audio_size
config.dataset.num_filters = test_data.input_size
config.dataset.window_size = test_data.window_size
config.dataset.shift = test_data.shift
logdata = {
"accuracy_val": rate,
"training_time": end - start,
"log": log
}
d = TrainingConfig.to_dict(config)
logdata.update(d)
logname = os.path.join(outdir, "train_results.json")
save_json(logdata, logname)
return rate, log
def str2bool(s):
s = s.lower()
return s in ["t", "true", "yes", "1"]
if __name__ == '__main__':
config = TrainingConfig()
parser = argparse.ArgumentParser("train a GRU based neural network for keyword spotting")
# all the training parameters
parser.add_argument("--epochs", help="Number of epochs to train", type=int)
parser.add_argument("--lr_scheduler", help="Type of learning rate scheduler (None, TriangleLR, CosineAnnealingLR,"
" ExponentialLR, ExponentialResettingLR)")
parser.add_argument("--learning_rate", help="Default learning rate, and maximum for schedulers", type=float)
parser.add_argument("--lr_min", help="Minimum learning rate for the schedulers", type=float)
parser.add_argument("--lr_peaks", help="Number of peaks for triangle and cosine schedules", type=float)
parser.add_argument("--batch_size", "-bs", help="Batch size of training", type=int)
parser.add_argument("--architecture", help="Specify model architecture (GRU, LSTM)")
parser.add_argument("--num_layers", type=int, help="Number of RNN layers (1, 2 or 3)")
parser.add_argument("--hidden_units", "-hu", type=int, help="Number of hidden units in the GRU layers")
parser.add_argument("--use_gpu", help="Whether to use GPU for training")
# or you can just specify an options file.
parser.add_argument("--config", help="Use json file containing all these options (as per 'training_config.py')")
# and some additional stuff ...
parser.add_argument("--eval", "-e", help="No training, just evaluate existing model", action='store_true')
parser.add_argument("--filename", "-o", help="Name of model file to generate")
parser.add_argument("--categories", "-c", help="Name of file containing keywords")
parser.add_argument("--dataset", "-a", help="Path to the audio folder containing 'training.npz' file")
parser.add_argument("--outdir", help="Folder in which to store output file and log files")
parser.add_argument("--detail", "-d", help="Save loss info for every iteration not just every epoch",
action="store_true")
args = parser.parse_args()
if args.config:
config.load(args.config)
# then any user defined options overrides these defaults
if args.epochs:
config.training.max_epochs = args.epochs
if args.learning_rate:
config.training.learning_rate = args.learning_rate
if args.lr_min:
config.training.lr_min = args.lr_min
if args.lr_peaks:
config.training.lr_peaks = args.lr_peaks
if args.lr_scheduler:
config.training.lr_scheduler = args.lr_scheduler
if args.batch_size:
config.training.batch_size = args.batch_size
if args.architecture:
config.model.architecture = args.architecture
if args.num_layers:
config.model.num_layers = args.num_layers
if args.hidden_units:
config.model.hidden_units = args.hidden_units
if args.filename:
config.model.filename = args.filename
if args.use_gpu:
config.training.use_gpu = str2bool(args.use_gpu)
if args.categories:
config.dataset.categories = args.categories
if args.dataset:
config.dataset.path = args.dataset
if not os.path.isfile("config.json"):
config.save("config.json")
train(config, args.eval, args.outdir, args.detail)
| 42.251149 | 118 | 0.630663 | 3,459 | 27,590 | 4.877999 | 0.144261 | 0.014402 | 0.017128 | 0.010905 | 0.336929 | 0.28999 | 0.257868 | 0.223256 | 0.210573 | 0.210573 | 0 | 0.006968 | 0.266546 | 27,590 | 652 | 119 | 42.315951 | 0.826843 | 0.178036 | 0 | 0.19457 | 0 | 0 | 0.108033 | 0.003938 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065611 | false | 0.022624 | 0.033937 | 0.006787 | 0.153846 | 0.052036 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ab88c3b900f97a77dbc9d68ec55937bb1824ddc | 5,308 | py | Python | gs_quant/test/models/test_risk_model.py | mlize/gs-quant | 13aba5c362f4f9f8a78ca9288c5a3e026160ce55 | [
"Apache-2.0"
] | 2 | 2021-06-22T12:14:38.000Z | 2021-06-23T15:51:08.000Z | gs_quant/test/models/test_risk_model.py | mlize/gs-quant | 13aba5c362f4f9f8a78ca9288c5a3e026160ce55 | [
"Apache-2.0"
] | null | null | null | gs_quant/test/models/test_risk_model.py | mlize/gs-quant | 13aba5c362f4f9f8a78ca9288c5a3e026160ce55 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2021 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import pytest
from unittest import mock
from gs_quant.models.risk_model import FactorRiskModel
from gs_quant.session import *
from gs_quant.target.risk_models import RiskModel as Risk_Model, CoverageType, Term, UniverseIdentifier, Entitlements
empty_entitlements = {
"execute": [],
"edit": [],
"view": [],
"admin": [],
"query": [],
"upload": []
}
mock_risk_model_obj = Risk_Model(
id_='model_id',
name='Fake Risk Model',
coverage=CoverageType.Country,
term=Term.Long,
universe_identifier=UniverseIdentifier.gsid,
vendor='GS',
version=1.0,
entitlements=empty_entitlements
)
def mock_risk_model(mocker):
from gs_quant.session import OAuth2Session
OAuth2Session.init = mock.MagicMock(return_value=None)
GsSession.use(Environment.QA, 'client_id', 'secret')
mocker.patch.object(
GsSession.__class__,
'default_value',
return_value=GsSession.get(
Environment.QA,
'client_id',
'secret'))
mocker.patch.object(GsSession.current, '_post', return_value=mock_risk_model_obj)
mocker.patch.object(GsSession.current, '_get', return_value=mock_risk_model_obj)
mocker.patch.object(GsSession.current, '_put', return_value=mock_risk_model_obj)
return FactorRiskModel('model_id')
def test_create_risk_model(mocker):
mock_risk_model(mocker)
risk_model_id = 'model_id'
mocker.patch.object(GsSession.current, '_get', return_value=mock_risk_model_obj)
new_model = FactorRiskModel.create(
coverage=CoverageType.Country,
id_=risk_model_id,
name='Fake Risk Model',
term=Term.Long,
universe_identifier=UniverseIdentifier.gsid,
vendor='GS',
version=1.0
)
assert new_model.id == risk_model_id
def test_update_risk_model_entitlements(mocker):
new_model = mock_risk_model(mocker)
new_entitlements = {
"execute": ['guid:X'],
"edit": [],
"view": [],
"admin": [],
"query": [],
"upload": []
}
new_model.add_entitlements(new_entitlements)
new_model.update()
assert 'guid:X' in new_model.entitlements.execute
mocker.patch.object(GsSession.current, '_get', return_value=new_model)
new_model.remove_entitlements(new_entitlements)
new_model.update()
assert 'guid:X' not in new_model.entitlements.execute
mocker.patch.object(GsSession.current, '_get', return_value=new_model)
new_model.add_entitlements(Entitlements.from_dict(new_entitlements))
new_model.update()
assert 'guid:X' in new_model.entitlements.execute
mocker.patch.object(GsSession.current, '_get', return_value=new_model)
new_model.remove_entitlements(Entitlements.from_dict(new_entitlements))
new_model.update()
assert 'guid:X' not in new_model.entitlements.execute
new_entitlements = {
"execute": ['guid:X'],
"edit": [],
"view": [],
"admin": ['guid:XX'],
"query": [],
"upload": ['guid:XXX']
}
new_model.entitlements = Entitlements.from_dict(new_entitlements)
new_model.update()
mocker.patch.object(GsSession.current, '_get', return_value=new_model)
assert 'guid:X' in new_model.entitlements.execute
assert 'guid:XX' in new_model.entitlements.admin
assert 'guid:XXX' in new_model.entitlements.upload
def test_update_risk_model(mocker):
new_model = mock_risk_model(mocker)
new_model.term = Term.Short
new_model.update()
mocker.patch.object(GsSession.current, '_get', return_value=new_model)
assert new_model.term == Term.Short
new_model.description = 'Test risk model'
new_model.update()
mocker.patch.object(GsSession.current, '_get', return_value=new_model)
assert new_model.description == 'Test risk model'
new_model.vendor = 'GS'
new_model.update()
mocker.patch.object(GsSession.current, '_get', return_value=new_model)
assert new_model.vendor == 'GS'
new_model.term = Term.Medium
new_model.update()
mocker.patch.object(GsSession.current, '_get', return_value=new_model)
assert new_model.term == Term.Medium
new_model.version = 0.1
new_model.update()
mocker.patch.object(GsSession.current, '_get', return_value=new_model)
assert new_model.version == 0.1
new_model.coverage = CoverageType.Global
new_model.update()
mocker.patch.object(GsSession.current, '_get', return_value=new_model)
assert new_model.coverage == CoverageType.Global
new_model.name = 'TEST RISK MODEL'
new_model.update()
mocker.patch.object(GsSession.current, '_get', return_value=new_model)
assert new_model.name == 'TEST RISK MODEL'
if __name__ == "__main__":
pytest.main([__file__])
| 32.765432 | 117 | 0.707611 | 681 | 5,308 | 5.259912 | 0.204112 | 0.11837 | 0.075935 | 0.116136 | 0.670017 | 0.624511 | 0.593244 | 0.552764 | 0.487437 | 0.441374 | 0 | 0.004136 | 0.180106 | 5,308 | 161 | 118 | 32.968944 | 0.818934 | 0.103994 | 0 | 0.479675 | 0 | 0 | 0.084035 | 0 | 0 | 0 | 0 | 0 | 0.121951 | 1 | 0.03252 | false | 0 | 0.04878 | 0 | 0.089431 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aba8c0e3d667f47418c6c76fe22ef18aaf9a8a4 | 2,675 | py | Python | Server/Python/utils/dbs3_logfile_parser_for_failed_blocks.py | vkuznet/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
] | 8 | 2015-08-14T04:01:32.000Z | 2021-06-03T00:56:42.000Z | Server/Python/utils/dbs3_logfile_parser_for_failed_blocks.py | yuyiguo/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
] | 162 | 2015-01-07T21:34:47.000Z | 2021-10-13T09:42:41.000Z | Server/Python/utils/dbs3_logfile_parser_for_failed_blocks.py | yuyiguo/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
] | 16 | 2015-01-22T15:27:29.000Z | 2021-04-28T09:23:28.000Z | #!/usr/bin/env python
"""
Script to find block names failed with block already in DBS error in cmsweb logs
To just find duplicated block names, one can use the following command in bash
grep -o -P \"(?<=Block name:\s)\S+$\" dbs-20130521.log | uniq -d
Not yet working since log files ar not consecutive at the moment
"""
from __future__ import print_function
from optparse import OptionParser
import re
def get_command_line_options():
parser = OptionParser(usage='%prog --log=<file.txt>')
parser.add_option("-l", "--log", dest="logfile", help="CMSWEB logfile", metavar="file.txt")
(options, args) = parser.parse_args()
if not (options.logfile):
parser.print_help()
parser.error('Mandatory options are --log')
return options, args
def find_status_code(iterator):
while True:
log_line = next(iterator)
match_obj = log_pattern.match(log_line)
try:
match_dict = match_obj.groupdict()
except AttributeError:
pass
else:
if match_dict['request'] == 'POST /dbs/prod/global/DBSWriter/bulkblocks HTTP/1.1':
return match_dict['status']
if __name__ == '__main__':
options, args = get_command_line_options()
log_parts = [r'^INFO:cherrypy.access:\[(?P<time>\S+)\]', # time
r'(?P<host>\S+)', # host
r'(?P<ip>\S+)', # ip
r'"(?P<request>.+)"', # request
r'(?P<status>[0-9]+)', # status
r'\[data:\s(?P<data_in>\S+)\sin\s(?P<data_out>\S+)\sout\s(?P<data_us>\S+)\sus\s]', # data
r'\[auth:\sOK\s"(?P<dn>.*?)"\s"(?P<dontknow>.*?)"\s\]', # auth data
r'\[ref:\s"(?P<referer>.*?)"\s"(?P<agent>.*?)"\s\]' # referer and agent
]
log_pattern = re.compile(r'\s+'.join(log_parts)+r'\s*\Z')
block_regex = re.compile(r'^Block name: (?P<block_name>\S+)$')
with open(options.logfile, 'r') as f:
read_lines = (read_line.strip() for read_line in f)
for line in read_lines:
match_obj = block_regex.match(line)
if match_obj:
status_code = find_status_code(read_lines)
if status_code!='200':
print(match_obj.groupdict()['block_name'])
print(status_code)
| 42.460317 | 119 | 0.500935 | 318 | 2,675 | 4.040881 | 0.424528 | 0.010895 | 0.014008 | 0.017121 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008711 | 0.356262 | 2,675 | 62 | 120 | 43.145161 | 0.737515 | 0.138692 | 0 | 0 | 0 | 0.022727 | 0.213036 | 0.110674 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0.022727 | 0.068182 | 0 | 0.159091 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2abaa582f6018627a222448cffa15bfe0e369cb6 | 11,644 | py | Python | hr_edl_data/tournament.py | lanctot/hr_edl_experiments | 52955c2349792f81d781a0b4a9e4ded8d68e5769 | [
"MIT"
] | 1 | 2021-08-21T05:41:23.000Z | 2021-08-21T05:41:23.000Z | hr_edl_data/tournament.py | lanctot/hr_edl_experiments | 52955c2349792f81d781a0b4a9e4ded8d68e5769 | [
"MIT"
] | null | null | null | hr_edl_data/tournament.py | lanctot/hr_edl_experiments | 52955c2349792f81d781a0b4a9e4ded8d68e5769 | [
"MIT"
] | 2 | 2021-06-07T12:40:32.000Z | 2021-08-21T05:41:32.000Z | import numpy as np
import pandas as pd
import matplotlib.lines as mlines
import seaborn as sns
import hr_edl_data.experiment_parameters as xp
# Algorithm labels
_alg_label_map = {
'CFR': r'$\\\text{CF}$',
'A-EFR': r'$\\\text{ACT}$',
'CFR_IN': r'$\\\text{CF}_{\text{IN}}$',
'A-EFR_IN': r'$\\\text{ACT}_{\text{IN}}$',
'CSPS-EFR': r'$\\\text{CSPS}$',
'CFPS-EFR': r'$\\\text{CFPS}$',
'CFPS-EFR_EX+IN': r'$\\\text{CFPS}_{\text{EX} + \text{IN}}$',
'TIPS-EFR': r'$\\\text{TIPS}$',
'TIPS-EFR_EX+IN': r'$\\\text{TIPS}_{\text{EX} + \text{IN}}$',
'CFR_EX+IN': r'$\\\text{CF}_{\text{EX} + \text{IN}}$',
'BPS-EFR': r'$\\\text{BPS}$',
'BEHAV-EFR': r'$\\\text{BHV}$',
}
def alg_label(tag):
if tag in _alg_label_map:
return _alg_label_map[tag]
return tag
# Algorithm ordering and sorting
_alg_order_map = {
'CFR': 0,
'CFR+': 1,
'A-EFR': 6,
'CFR_IN': 8,
'A-EFR_IN': -1,
'CSPS-EFR': 20,
'CFPS-EFR': 16,
'CFPS-EFR_EX+IN': 18,
'TIPS-EFR': 22,
'TIPS-EFR_EX+IN': 24,
'CFR_EX+IN': 10,
'BPS-EFR': 14,
'BEHAV-EFR': 26,
'avg': 30,
'BR': 31,
}
def alg_sort_key(tag):
return _alg_order_map[tag]
def alg_sort_keys(tags):
return pd.Index([alg_sort_key(tag) for tag in tags], name=tags.name)
def with_sorted_algs(df):
df = df.sort_index(axis=0, key=alg_sort_keys)
df.sort_index(axis=1, key=alg_sort_keys, inplace=True)
return df
_game_tags = [
'leduc',
'kuhn_3p',
'kuhn_4p',
'goofspiel',
'goofspiel_ascending',
'random_goofspiel',
'goofspiel_3p',
'goofspiel_ascending_3p',
'sheriff',
'tiny_bridge',
'tiny_hanabi',
]
# Game labels
_game_label_map = {
'sheriff': r'Sheriff',
'tiny_bridge': r'tiny bridge',
'kuhn_3p': r'Kuhn poker',
'kuhn_4p': r'Kuhn poker',
'leduc': r"Leduc hold'em",
'random_goofspiel': r"goofspiel",
'tiny_hanabi': r"tiny Hanabi"
}
def game_label(tag, t=None):
if tag in _game_label_map:
name = _game_label_map[tag]
elif tag[:len('goofspiel')] == 'goofspiel':
name = 'goofspiel'
else:
name = tag
params_string = ''
if tag in xp.NUM_PLAYERS_MAP:
params_string = r'N=\num{{{}}}'.format(xp.NUM_PLAYERS_MAP[tag])
if t is not None:
params_string = r'{},T=\num{{{}}}'.format(params_string, t)
if tag in xp.EXTRA_GAME_PARAMS_MAP:
return '{}$({})$'.format(
name,
','.join(xp.EXTRA_GAME_PARAMS_MAP[tag] + (params_string,)))
if params_string != '':
return '{}$({})$'.format(name, params_string)
else:
return name
# Game order and sorting
_game_order_map = {tag: i for i, tag in enumerate(_game_tags + ['avg'])}
def game_sort_key(tag):
return _game_order_map[tag]
def game_sort_keys(tags):
return pd.Index([game_sort_key(tag) for tag in tags], name=tags.name)
def with_sorted_alg_game(df):
df = df.sort_index(axis=0, key=alg_sort_keys)
df.sort_index(axis=1, key=game_sort_keys, inplace=True)
return df
# Utility adjustment
def adjust_game_utility(game_tag, value):
if game_tag == 'leduc':
return value / 13.
if game_tag == 'kuhn_3p':
return (value - 1) / 3.
if game_tag == 'tiny_bridge':
return (value + 2) / 38.
if game_tag == 'goofspiel' or game_tag == 'random_goofspiel':
return value
else:
raise BaseException(f"No adjustment registered for {game_tag}")
def adjust_utilities(series_by_game):
return pd.Series({
game_tag: adjust_game_utility(game_tag, series_by_game.at[game_tag])
for game_tag in series_by_game.index
})
# Data manipulation
def load_df(file_name='../results/efr_data.npy', **kwargs):
return pd.DataFrame.from_records(
np.load('../results/efr_data.npy', allow_pickle=True, **kwargs))
def without_br_row(df):
return df.query('row_alg != "BR"')
def br_row(df):
return df.query('row_alg == "BR"')
def with_avg_row(df):
row_avgs = df.mean(axis=1)
row_avgs.name = 'avg'
return pd.concat([df, row_avgs], axis=1)
def with_avg_row_col(df, exclude_br=True):
if exclude_br and 'BR' in df.index:
col_avgs = without_br_row(df).mean(axis=0)
else:
col_avgs = df.mean(axis=0)
mean_of_means = col_avgs.mean()
col_avgs = col_avgs.to_frame('avg').transpose()
row_avgs = df.mean(axis=1)
row_avgs = pd.concat([row_avgs, pd.Series({'avg': mean_of_means})], axis=0)
row_avgs.name = 'avg'
df = pd.concat([df, col_avgs], axis=0)
return pd.concat([df, row_avgs], axis=1)
def max_element(df):
return df.max().max().squeeze()
def min_element(df):
return df.min().min().squeeze()
def mean_element(df):
return df.mean().mean().squeeze()
def midpoint(df):
return (max_element(df) - min_element(df)) / 2.0
class Data():
@classmethod
def load(cls, file_name='../results/efr_data.npy', **kwargs):
return cls(load_df(file_name, **kwargs))
def __init__(self, data):
self.data = data
def games(self):
return sorted(self.data.game_tag.unique(), key=game_sort_key)
def det_table(self, game, mode, t):
assert mode == 'fixed' or mode == 'sim'
df = self.data.query(f'game_tag == "{game}" & mode == "{mode}" & t == {t}')
return df.pivot(index='row_alg', columns='col_alg', values='value')
def time_avg_det_table(self, game, mode, num_iterations):
assert mode == 'fixed' or mode == 'sim'
df = self.data.query(
f'game_tag == "{game}" & mode == "{mode}" & t < {num_iterations}')
return df.pivot(index='t', columns=['row_alg', 'col_alg'],
values='value').mean().unstack()
def fixed_table(self, game, t):
return self.det_table(game, 'fixed', t)
def time_avg_fixed_table(self, game, num_iterations):
return self.time_avg_det_table(game, 'fixed', num_iterations)
def sim_table(self, game, t):
return self.det_table(game, 'sim', t)
def time_avg_sim_table(self, game, num_iterations):
return self.time_avg_det_table(game, 'sim', num_iterations)
def all_seeds(self):
return self.data.seed.unique()
def shuffled_table(self, game, seed, t):
df = self.data.query(
f'game_tag == "{game}" & mode == "shuffled" & seed == "{seed}" & t == {t}'
)
return df.pivot(index='row_alg', columns='col_alg', values='value')
def each_shuffled_table(self, game, t):
for seed in self.all_seeds():
yield self.shuffled_table(game, seed, t)
def shuffled_table_by_seed(self, game, t):
df = self.data.query(
f'game_tag == "{game}" & mode == "shuffled" & t == {t}')
return df.pivot(index='seed',
columns=['row_alg', 'col_alg'],
values='value')
def avg_shuffled_table(self, game, t):
return self.shuffled_table_by_seed(game, t).mean().unstack()
def max_abs_diff(self, game, t):
df = self.shuffled_table_by_seed(game, t)
return df.max() - df.min()
def max_abs_diff_from_mean(self, game, t):
df = self.shuffled_table_by_seed(game, t)
df_mean = df.mean()
return np.maximum(df.max() - df_mean, df_mean - df.min())
def det_avg_table(self, mode, t):
assert mode == 'fixed' or mode == 'sim'
return self.data.query(f'mode == "{mode}" & t == {t}').pivot(
index='col_alg', columns=['game_tag',
'row_alg'], values='value').mean().unstack()
def time_avg_det_avg_table(self, mode, num_iterations):
assert mode == 'fixed' or mode == 'sim'
return self.data.query(f'mode == "{mode}" & t < {num_iterations}').pivot(
index=['t', 'col_alg'], columns=['game_tag', 'row_alg'],
values='value').mean().unstack()
def fixed_avg_table(self, t):
return self.det_avg_table('fixed', t)
def time_avg_fixed_avg_table(self, num_iterations):
return self.time_avg_det_avg_table('fixed', num_iterations)
def sim_avg_table(self, t):
return self.det_avg_table('sim', t)
def time_avg_sim_avg_table(self, num_iterations):
return self.time_avg_det_avg_table('sim', num_iterations)
def shuffled_avg_table(self, t):
return self.data.query(f'mode == "shuffled" & t == {t}').pivot(
index=['seed', 'game_tag'],
columns=['row_alg', 'col_alg'],
values='value').mean().unstack()
# Plotting
def add_lines_to_separate_avg_row_col(axes, seperate_br=False):
xmin, xmax = axes.get_xbound()
ymin, ymax = axes.get_ybound()
axes.add_line(
mlines.Line2D([xmax - 1, xmax - 1], [ymin, ymax],
color='black',
linewidth=3))
axes.add_line(
mlines.Line2D([xmin, xmax], [ymax - 1, ymax - 1],
color='black',
linewidth=3))
if seperate_br:
axes.add_line(
mlines.Line2D([xmin, xmax], [ymax - 2, ymax - 2],
color='black',
linewidth=3))
def br_percentage_heatmap(df, ax=None, fmt="0.2f", **kwargs):
df_with_avgs = with_avg_row_col(without_br_row(df))
df_col_mins = df_with_avgs.min(axis=0)
df_as_frac_of_br = (df_with_avgs - df_col_mins) / (
with_avg_row(br_row(df)).squeeze() - df_col_mins)
g = sns.heatmap(
with_sorted_algs(df_as_frac_of_br),
annot=True,
fmt=fmt,
xticklabels=[
alg_label(tag)
for tag in sorted(df_with_avgs.columns, key=alg_sort_key)
],
yticklabels=[
alg_label(tag) for tag in sorted(df_with_avgs.index, key=alg_sort_key)
],
annot_kws={'size': 'large'},
center=df_as_frac_of_br.at['avg', 'avg'],
vmin=0,
vmax=1,
ax=ax,
**kwargs)
g.axes.xaxis.set_ticks_position("top")
add_lines_to_separate_avg_row_col(g.axes)
return g
def heatmap(df, ax=None, **kwargs):
df_with_avgs = with_avg_row_col(df)
g = sns.heatmap(
with_sorted_algs(df_with_avgs),
annot=True,
xticklabels=[
alg_label(tag)
for tag in sorted(df_with_avgs.columns, key=alg_sort_key)
],
yticklabels=[
alg_label(tag) for tag in sorted(df_with_avgs.index, key=alg_sort_key)
],
annot_kws={'size': 'large'},
# center=df_with_avgs.at['avg', 'avg'],
ax=ax,
**kwargs)
g.axes.xaxis.set_ticks_position("top")
add_lines_to_separate_avg_row_col(g.axes, 'BR' in df.index)
return g
def br_percentage_heatmap_avg(df, ax=None, fmt="0.2f", **kwargs):
df_with_avgs = with_avg_row_col(without_br_row(df))
df_col_mins = df_with_avgs.min(axis=0)
df_as_frac_of_br = (df_with_avgs - df_col_mins) / (
with_avg_row(br_row(df)).squeeze() - df_col_mins)
g = sns.heatmap(
with_sorted_alg_game(df_as_frac_of_br),
annot=True,
fmt=fmt,
xticklabels=[
game_label(tag)
for tag in sorted(df_with_avgs.columns, key=game_sort_key)
],
yticklabels=[
alg_label(tag) for tag in sorted(df_with_avgs.index, key=alg_sort_key)
],
annot_kws={'size': 'large'},
center=df_as_frac_of_br.at['avg', 'avg'],
vmin=0,
vmax=1,
ax=ax,
**kwargs)
g.axes.xaxis.set_ticks_position("top")
add_lines_to_separate_avg_row_col(g.axes)
return g
def heatmap_avg(df, ax=None, **kwargs):
df_with_avgs = with_avg_row_col(df)
g = sns.heatmap(
with_sorted_alg_game(df_with_avgs),
annot=True,
xticklabels=[
game_label(tag)
for tag in sorted(df_with_avgs.columns, key=game_sort_key)
],
yticklabels=[
alg_label(tag) for tag in sorted(df_with_avgs.index, key=alg_sort_key)
],
annot_kws={'size': 'large'},
center=df_with_avgs.at['avg', 'avg'],
ax=ax,
**kwargs)
g.axes.xaxis.set_ticks_position("top")
add_lines_to_separate_avg_row_col(g.axes, 'BR' in df.index)
return g
| 27.72381 | 82 | 0.630368 | 1,796 | 11,644 | 3.811247 | 0.123608 | 0.017531 | 0.029218 | 0.01607 | 0.579109 | 0.523594 | 0.481081 | 0.465303 | 0.433163 | 0.386706 | 0 | 0.008127 | 0.207403 | 11,644 | 419 | 83 | 27.789976 | 0.733557 | 0.014256 | 0 | 0.362229 | 0 | 0.003096 | 0.144912 | 0.018746 | 0 | 0 | 0 | 0 | 0.012384 | 1 | 0.145511 | false | 0 | 0.01548 | 0.077399 | 0.318885 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2abab7ecf28c074b6eba41c754e995473cdc658f | 938 | py | Python | test/server/test_cli.py | RaT0M/multiply-ui | ad7fffb15cc962604340b31b38d34bc470fa8448 | [
"MIT"
] | null | null | null | test/server/test_cli.py | RaT0M/multiply-ui | ad7fffb15cc962604340b31b38d34bc470fa8448 | [
"MIT"
] | 20 | 2019-05-21T10:33:36.000Z | 2019-12-11T08:13:29.000Z | test/server/test_cli.py | RaT0M/multiply-ui | ad7fffb15cc962604340b31b38d34bc470fa8448 | [
"MIT"
] | 1 | 2020-10-14T12:32:36.000Z | 2020-10-14T12:32:36.000Z | import unittest
import click.testing
from multiply_ui.server.cli import mui_server
class CliTest(unittest.TestCase):
@classmethod
def invoke_cli(cls, *args):
runner = click.testing.CliRunner()
return runner.invoke(mui_server, args, catch_exceptions=False)
def test_help_option(self):
result = self.invoke_cli('--help')
self.assertEqual(0, result.exit_code)
self.assertEqual(
(
'Usage: mui-server [OPTIONS]\n'
'\n'
' Starts a service which exposes a RESTful API to the Multiply UI.\n'
'\n'
'Options:\n'
' -p, --port INTEGER Set service port number. Defaults to 9090.\n'
' -a, --address TEXT Set service IP address. Defaults to "0.0.0.0".\n'
' --help Show this message and exit.\n'
),
result.stdout)
| 32.344828 | 88 | 0.555437 | 110 | 938 | 4.654545 | 0.545455 | 0.052734 | 0.011719 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014563 | 0.341151 | 938 | 28 | 89 | 33.5 | 0.813916 | 0 | 0 | 0.086957 | 0 | 0.043478 | 0.324094 | 0 | 0 | 0 | 0 | 0 | 0.086957 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2abb9646a5986fb903415f118dc93e172fe2e55d | 3,531 | py | Python | ml/simple_linear_regression.py | ad-free/lab-ml | 5b52ae6201c426313210752709c57233b980b754 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | ml/simple_linear_regression.py | ad-free/lab-ml | 5b52ae6201c426313210752709c57233b980b754 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | ml/simple_linear_regression.py | ad-free/lab-ml | 5b52ae6201c426313210752709c57233b980b754 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from commons.utils import Download
class SimpleLinearRegression:
def __init__(self, is_download: bool = False, filename: str = 'FuelConsumption.csv'):
self.url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs' \
'/FuelConsumptionCo2.csv '
if is_download:
Download(url=self.url, filename=filename)
self.df = pd.read_csv('download/FuelConsumption.csv')
def data_exploration(self) -> None:
print('[+] Show data head')
print(self.df.head())
print('[+] Summarize the data')
print(self.df.describe())
def plot_features(self) -> None:
# cdf = self.df[["ENGINESIZE", "CYLINDERS", "FUELCONSUMPTION_COMB", "CO2EMISSIONS"]]
cdf = self.df[self.df.columns.values]
cdf.hist()
plt.show()
plt.close()
def plot_linear(self, dependent: str = None, independent: str = None) -> None:
plt.scatter(self.df[independent], self.df[dependent], color='blue')
plt.xlabel(independent)
plt.ylabel(dependent)
plt.show()
plt.close()
def simple_regression_model(self) -> None:
"""
Step 1:
- Creating train and test dataset
Step 2:
- Mean absolute error:
It is the mean of the absolute value of the errors.
This is the easiest of the metrics to understand since it’s just average error.
- Mean Squared Error (MSE):
Mean Squared Error (MSE) is the mean of the squared error.
It’s more popular than Mean absolute error because the focus is geared more towards large errors.
This is due to the squared term exponentially increasing larger errors in comparison to smaller ones.
- Root Mean Squared Error (RMSE).
- R-squared is not error, but is a popular metric for accuracy of your model.
It represents how close the data are to the fitted regression line.
The higher the R-squared, the better the model fits your data.
Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse).
"""
cdf = self.df[["ENGINESIZE", "CYLINDERS", "FUELCONSUMPTION_COMB", "CO2EMISSIONS"]]
msk = np.random.rand(len(self.df)) < 0.8
train = cdf[msk]
test = cdf[~msk]
# Training Model with Linear Regression
regr = LinearRegression()
train_x = np.asanyarray(train[['ENGINESIZE']])
train_y = np.asanyarray(train[['CO2EMISSIONS']])
regr.fit(train_x, train_y)
# The coefficients
print('Coefficient:', regr.coef_)
print('Intercept:', regr.intercept_)
plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color='blue')
plt.plot(train_x, regr.coef_[0][0] * train_x + regr.intercept_[0], '-r')
plt.xlabel('Engine Size')
plt.ylabel('Emissions')
plt.show()
plt.close()
# Step 2: Evaluation
test_x = np.asanyarray(test[['ENGINESIZE']])
test_y = np.asanyarray(test[['CO2EMISSIONS']])
test_y_ = regr.predict(test_x)
print("Mean absolute error: %.2f" % np.mean(np.absolute(test_y_ - test_y)))
print("Residual sum of squares (MSE): %.2f" % np.mean((test_y_ - test_y) ** 2))
print("R2-score: %.2f" % r2_score(test_y_, test_y))
| 38.802198 | 119 | 0.635231 | 459 | 3,531 | 4.797386 | 0.374728 | 0.027248 | 0.012262 | 0.020436 | 0.082652 | 0.053588 | 0.053588 | 0.053588 | 0 | 0 | 0 | 0.010968 | 0.251204 | 3,531 | 90 | 120 | 39.233333 | 0.821861 | 0.28094 | 0 | 0.115385 | 0 | 0.019231 | 0.178333 | 0.02125 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096154 | false | 0 | 0.115385 | 0 | 0.230769 | 0.173077 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2abd82a418ea48f6e6c8c5e730769a7fbd3fa2a2 | 5,242 | py | Python | src/external/coremltools_wrap/coremltools/coremltools/converters/tensorflow/test/test_tf_keras_layers.py | Bpowers4/turicreate | 73dad213cc1c4f74337b905baea2b3a1e5a0266c | [
"BSD-3-Clause"
] | 1 | 2021-04-23T10:51:03.000Z | 2021-04-23T10:51:03.000Z | src/external/coremltools_wrap/coremltools/coremltools/converters/tensorflow/test/test_tf_keras_layers.py | Bpowers4/turicreate | 73dad213cc1c4f74337b905baea2b3a1e5a0266c | [
"BSD-3-Clause"
] | 2 | 2019-03-28T00:17:14.000Z | 2019-03-28T00:17:47.000Z | src/external/coremltools_wrap/coremltools/coremltools/converters/tensorflow/test/test_tf_keras_layers.py | Bpowers4/turicreate | 73dad213cc1c4f74337b905baea2b3a1e5a0266c | [
"BSD-3-Clause"
] | null | null | null | import unittest
import sys, os, shutil, tempfile
import tensorflow as tf
import numpy as np
import coremltools
from tensorflow.python.tools.freeze_graph import freeze_graph
from tensorflow.keras import backend as K
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, \
SeparableConv2D
from test_utils import generate_data, tf_transpose
from test_base import TFNetworkTest
DEBUG = False
class TFKerasNetworkTest(TFNetworkTest):
@classmethod
def setUpClass(self):
"""Set up the unit test by loading common utilities.
"""
K.set_learning_phase(0)
def _test_keras_model(
self, model, data_mode='random', delta=1e-2, use_cpu_only=True, has_variables=True):
"""Saves out the backend TF graph from the Keras model and tests it
"""
model_dir = tempfile.mkdtemp()
graph_def_file = os.path.join(model_dir, 'tf_graph.pb')
checkpoint_file = os.path.join(model_dir, 'tf_model.ckpt')
frozen_model_file = os.path.join(model_dir, 'tf_frozen.pb')
coreml_model_file = os.path.join(model_dir, 'coreml_model.mlmodel')
input_shapes = {inp.op.name: inp.shape.as_list() for inp in model.inputs}
for name, shape in input_shapes.items():
input_shapes[name] = [dim if dim is not None else 1 for dim in shape]
output_node_names = [output.op.name for output in model.outputs]
tf_graph = K.get_session().graph
tf.reset_default_graph()
if has_variables:
with tf_graph.as_default() as g:
saver = tf.train.Saver()
# TODO - if Keras backend has_variable is False, we're not making variables constant
with tf.Session(graph=tf_graph) as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {}
for name, shape in input_shapes.items():
tensor_name = tf_graph.get_operation_by_name(name).outputs[0].name
feed_dict[tensor_name] = generate_data(shape, data_mode)
# run the result
fetches = [
tf_graph.get_operation_by_name(name).outputs[0] for name in output_node_names
]
result = sess.run(fetches, feed_dict=feed_dict)
# save graph definition somewhere
tf.train.write_graph(sess.graph, model_dir, graph_def_file, as_text=False)
# freeze_graph() has been raising error with tf.keras models since no
# later than TensorFlow 1.6, so we're not using freeze_graph() here.
# See: https://github.com/tensorflow/models/issues/5387
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
tf_graph.as_graph_def(), # The graph_def is used to retrieve the nodes
output_node_names # The output node names are used to select the useful nodes
)
with tf.gfile.GFile(frozen_model_file, "wb") as f:
f.write(output_graph_def.SerializeToString())
K.clear_session()
# convert to CoreML
mlmodel = coremltools.converters.tensorflow.convert(
frozen_model_file,
inputs=input_shapes,
outputs=output_node_names,
use_cpu_only=use_cpu_only)
if DEBUG:
print('\n mlmodel description: \n')
from coremltools.models.neural_network.printer import print_network_spec
print_network_spec(mlmodel.get_spec(), style='coding')
mlmodel.save(coreml_model_file)
print('\n mlmodel saved at %s' % (coreml_model_file))
# Transpose input data as CoreML requires
coreml_inputs = {
name: tf_transpose(feed_dict[self._get_tf_tensor_name(tf_graph, name)])
for name in input_shapes
}
# Run predict in CoreML
coreml_output = mlmodel.predict(coreml_inputs, useCPUOnly=use_cpu_only)
for idx, out_name in enumerate(output_node_names):
tf_out = result[idx]
if len(tf_out.shape) == 0:
tf_out = np.array([tf_out])
tp = tf_out.flatten()
coreml_out = coreml_output[out_name]
cp = coreml_out.flatten()
self.assertTrue(tf_out.shape == coreml_out.shape)
for i in range(len(tp)):
max_den = max(1.0, tp[i], cp[i])
self.assertAlmostEqual(tp[i] / max_den, cp[i] / max_den, delta=delta)
# Cleanup files - models on disk no longer useful
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
class KerasBasicNumericCorrectnessTest(TFKerasNetworkTest):
def test_dense_softmax(self):
np.random.seed(1987)
# Define a model
model = Sequential()
model.add(Dense(32, input_shape=(32,), activation='softmax'))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test it
self._test_keras_model(model)
if __name__ == '__main__':
unittest.main()
| 40.015267 | 118 | 0.644029 | 692 | 5,242 | 4.650289 | 0.312139 | 0.021753 | 0.027968 | 0.017402 | 0.085768 | 0.073959 | 0.073959 | 0.022996 | 0.022996 | 0 | 0 | 0.007051 | 0.269554 | 5,242 | 130 | 119 | 40.323077 | 0.833377 | 0.147844 | 0 | 0.022222 | 0 | 0 | 0.029948 | 0 | 0 | 0 | 0 | 0.007692 | 0.022222 | 1 | 0.033333 | false | 0 | 0.133333 | 0 | 0.188889 | 0.044444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2abdd985da5842c6cc3d2828a3f75d66fde3d35c | 9,343 | py | Python | dash/odorless/app/main.py | rgerkin/pyrfume | 54e79dc054557b294f8905b379efbcb94e73573e | [
"MIT"
] | null | null | null | dash/odorless/app/main.py | rgerkin/pyrfume | 54e79dc054557b294f8905b379efbcb94e73573e | [
"MIT"
] | null | null | null | dash/odorless/app/main.py | rgerkin/pyrfume | 54e79dc054557b294f8905b379efbcb94e73573e | [
"MIT"
] | null | null | null | from collections import OrderedDict
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from ipywidgets import Text, Image, Layout, GridBox
import plotly.graph_objs as go
import flask
import numpy as np
import pandas as pd
import pickle
import pyrfume
from pyrfume.odorants import smiles_to_image, all_odorants, all_sources
##### Initialize app #####
external_stylesheets = ['https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css']
app = flask.Flask(__name__)
dapp = dash.Dash(__name__,
server=app,
url_base_pathname='/',
external_stylesheets=external_stylesheets)
##### Load data #####
pyrfume.set_data_path('data')
gdb_umap = pyrfume.load_data('gdb_umap.pkl', remote=False)
pf_umap = pyrfume.load_data('pf_umap.pkl', remote=False)
hover_on = 1
def plot(big_umap, known_umap, skip=10):
big_umap = big_umap.iloc[::skip]
known_umap = known_umap.iloc[::skip]
# The GDB scatter plot
skip = 10
big_scatter = go.Scatter(
x=big_umap.loc[:, 0],
y=big_umap.loc[:, 1],
name='Possible Molecules',
mode="markers",
hoverinfo="text",
opacity=0.5,
marker={
"size": 5,
"line": {"width": 0.5, "color": "white"},
"color": big_umap.loc[:, 'p'],
"colorscale": 'magma',
"colorbar": {'thickness': 20, 'title': 'p(Odorous)'},
},
)
# The known odorants scatter plot
known_scatter = go.Scattergl(
x=known_umap.loc[:, 0],
y=known_umap.loc[:, 1],
name='Known Odorous',
mode="markers",
hoverinfo="text",
opacity=1,
marker={
"size": 5,
"line": {"width": 0.5, "color": "white"},
"color": "blue",
},
)
# The axes, etc.
layout = go.Layout(
xaxis={"type": "linear", "title": "", "showline": False, "showticklabels": False},
yaxis={"type": "linear", "title": "", "showline": False, "showticklabels": False},
margin={"l": 40, "b": 40, "t": 10, "r": 10},
legend={"x": 0, "y": 1, 'font':{'size':15, 'color': 'white'}},
hovermode="closest",
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(10,10,10,1)",
width=1000,
height=1000,
xaxis_showgrid=False,
yaxis_showgrid=False,
xaxis_zeroline=False,
yaxis_zeroline=False
)
fig = go.FigureWidget(data=[big_scatter, known_scatter], layout=layout)
fig.layout.hovermode = 'closest'
# The 2D drawing of the molecule
first_smiles = "CCCCO"
image_widget = Image(
value=smiles_to_image(first_smiles),
layout=Layout(height="200px", width="200px", left='50px', top='-250px')
)
text_widget = Text(
value=first_smiles,
placeholder=first_smiles,
description='SMILES:',
disabled=False,
layout=Layout(width='400px', left='25px', top='-285px')
)
def hover_fn(trace, points, state):
#print(points)
try:
ind = points.point_inds[0]
if trace.name == 'Possible Molecules':
use = big_umap
else:
use = known_umap
smiles = use.index[ind]
image_widget.value = smiles_to_image(smiles, size=200)
text_widget.value = smiles
except IndexError:
pass
except Exception as e:
print(e)
def click_fn(trace, points, state):
global hover_on
if hover_on:
fig.data[0].on_hover(None)
hover_on = 0
else:
fig.data[0].on_hover(hover_fn)
hover_on = 1
fig.data[0].on_hover(hover_fn)
fig.data[0].on_click(click_fn)
canvas = GridBox([fig, image_widget, text_widget], layout={'gridgap': '0'})
return canvas
plot(gdb_umap, pf_umap, skip=10)
details = all_odorants()
sources = all_sources()
spaces = OrderedDict({'snitz': 'Snitz Map',
'haddad': 'Haddad Map',
'westeros': 'Westeros Map'})
algorithm = 'umap'
embeddings = {}
for space in spaces:
key = '%s_%s' % (space, algorithm)
with open(pyrfume.DATA_DIR / 'odorants' / ('%s.pkl' % key), 'rb') as f:
embeddings[space] = pickle.load(f)
# Remove molecules for which there are no details
good_cids = details.index.intersection(embeddings[space].index)
embeddings[space] = embeddings[space].loc[good_cids, :]
# Assert that all embeddings have the same number of molecules
assert len(set([embeddings[space].shape[0] for space in spaces])) == 1
def make_figure(embedding, title):
return {
'data': [
go.Scattergl(**{
'x': embedding.loc[:, 'X'].values,
'y': embedding.loc[:, 'Y'].values,
'mode': 'markers',
'marker': {'size': 5,
'color': embedding.loc[:, 'cluster'].values,
'opacity': 0.5,
'cmax': 9,
'cmin': 0,
'colorscale': 'Rainbow'}
}),
],
'layout': {
'title': title,
'clickmode': 'event',
'xaxis': {'type': 'linear',
'title': '',
'zeroline': False,
'showgrid': False,
'showline': False,
'showticklabels': False},
'yaxis': {'type': 'linear',
'title': '',
'zeroline': False,
'showgrid': False,
'showline': False,
'showticklabels': False},
'margin': {'l': 40, 'b': 40, 't': 10, 'r': 10},
'legend': {'x': 0, 'y': 1},
'hovermode': 'closest',
'paper_bgcolor': 'rgba(0,0,0,0)',
'plot_bgcolor': 'rgba(0,0,0,0)',
'width': 700,
'height': 500,
'margin': {'t': 50},
},
}
###### App layout ######
dapp.layout = html.Div(className='container-fluid', children=[
html.Div(className='row', children=[
html.Div(className='col', children=[
dcc.Graph(
id=space,
clear_on_unhover=True,
figure=make_figure(embeddings[space], title))
])
for space, title in spaces.items()]),
html.Div(className='row', children=[
html.Div(className='col', children=[
html.Table([
html.Tr([html.Td(['CID:']),
html.Td([html.A(id='cid',
href='#')])]),
html.Tr([html.Td(['MW:']),
html.Td(id='mw')]),
html.Tr([html.Td(['Name:']),
html.Td(id='name')]),
html.Tr([html.Td(['SMILES:']),
html.Td(id='smiles')]),
html.Tr([html.Td(['IUPAC:']),
html.Td(id='iupac')]),
html.Tr([html.Td(['Sources:']),
html.Td(id='sources')]),
], style={'vertical-align': 'middle',
'font-size': '160%'})]),
html.Div(className='col', children=[
html.Img(id='molecule-2d', src=''),
]),
]),
html.Div(id='hidden-div', style={'display': 'none'})
])
# multiple callback error suppressed [Div has no .keys() atrribute]
# dapp.config['suppress_callback_exceptions']=True
def get_index(*hoverData):
index = None
for hoverDatum in hoverData:
if hoverDatum:
index = hoverDatum['points'][0]['pointIndex']
return index
##### App callbacks #####
@dapp.callback(
[Output('cid', 'children'),
Output('cid', 'href'),
Output('mw', 'children'),
Output('name', 'children'),
Output('smiles', 'children'),
Output('iupac', 'children'),
Output('sources', 'children'),
Output('molecule-2d', 'src'),
],
[Input(space, 'hoverData') for space in spaces],
)
def _display_hover(*hoverData):
index = get_index(*hoverData)
if index is not None:
print(index)
return display_hover(index)
def display_hover(index):
columns = ['CID', 'MW', 'Name', 'SMILES', 'IUPACName']
if index is None:
return ['']*8
info = details.reset_index().iloc[index][columns]
cid, mw, name, smiles, iupacname = info.values
cid_url = 'https://pubchem.ncbi.nlm.nih.gov/compound/%d' % cid
source = sources.reset_index().loc[index]
source = ', '.join(source[source == 1].index)
hover_image_src = display_hover_image(index)
return [cid, cid_url, mw, name, smiles, iupacname, source, hover_image_src]
def display_hover_image(index):
if index is None:
return ''
smiles = details.iloc[index]['SMILES']
image = smiles_to_image(smiles, png=True, b64=True, crop=True, size=500)
src = 'data:image/png;base64, %s' % image
return src
##### Run app #####
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True, port=80)
| 31.996575 | 99 | 0.524671 | 1,034 | 9,343 | 4.618956 | 0.281431 | 0.005025 | 0.005025 | 0.015075 | 0.180067 | 0.155988 | 0.147822 | 0.120394 | 0.108459 | 0.108459 | 0 | 0.022107 | 0.31735 | 9,343 | 291 | 100 | 32.106529 | 0.726717 | 0.042492 | 0 | 0.161826 | 0 | 0.004149 | 0.141844 | 0.002477 | 0 | 0 | 0 | 0 | 0.004149 | 1 | 0.033195 | false | 0.004149 | 0.053942 | 0.004149 | 0.120332 | 0.008299 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2abeabf60b138fc6a7e803a48865fb94efe621d5 | 1,760 | py | Python | src/sarsa.py | Mengard/ML | 24145fff5073541bc132c84e5b8976ff06d8e978 | [
"MIT"
] | null | null | null | src/sarsa.py | Mengard/ML | 24145fff5073541bc132c84e5b8976ff06d8e978 | [
"MIT"
] | null | null | null | src/sarsa.py | Mengard/ML | 24145fff5073541bc132c84e5b8976ff06d8e978 | [
"MIT"
] | null | null | null | import random
import numpy as np
class Sarsa:
def __init__(self, logic, epsilon, alpha, gamma, actions):
self.Q = {} # state-action dictionary
self.epsilon = epsilon # used for epsilon-greedy choice
self.alpha = alpha # used for diminished returns, "the sooner the better"
self.gamma = gamma # learning rate, importance given to new data
self.actions = actions # needs to know which actions are possible for greedy action choice
def Qget(self, state, action):
return self.Q.get((state, action), 0.0)
def learn(self, old_state, old_action, new_state, new_action, reward):
self.epsilon = self.epsilon + (1 - self.epsilon) * 0.0001
# the closer gamma is from 1, the more memories will be overwritten by new data
# the closer gamme is from 0, the more memories will stay the same as the old data
forget = (self.gamma) * self.Qget(old_state, old_action) # how much will be forgotten
learn = (self.gamma) * (self.Qget(new_state, new_action) + reward) # how much will be learn
self.Q[(old_state, old_action)] = self.Qget(old_state, old_action) - forget + self.alpha * learn # update value
def choose_action(self, state):
# epsilon-greedy is an algorithm that will chose the best action when random() > epsilon, else random move
# the closer epsilon is from 1, the more the algorithm will chose the best action
# the closer epsilon is from 0, the more the algorithm will chose a random action
if random.random() > self.epsilon: # random action
action = random.choice(self.actions)
else : # looking for best action
Qs = [self.Qget(state, action) for action in self.actions]
action = self.actions[random.choice(np.where(Qs == np.max(Qs))[0])] # random choice between the best actions if ties occur
return action | 55 | 125 | 0.727841 | 279 | 1,760 | 4.530466 | 0.308244 | 0.043513 | 0.03481 | 0.053797 | 0.206487 | 0.083861 | 0 | 0 | 0 | 0 | 0 | 0.009015 | 0.180682 | 1,760 | 32 | 126 | 55 | 0.867545 | 0.452273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0 | 0.086957 | 0.043478 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ac2d7174b4b8c87cbe334ff237265f4c1cf1f4d | 3,867 | py | Python | docker/app/tests/test_preprocess.py | Karthick47v2/mcq-generator | e35a877ed5d0947cd37c5c5114fb78fdf68b1a5f | [
"MIT"
] | null | null | null | docker/app/tests/test_preprocess.py | Karthick47v2/mcq-generator | e35a877ed5d0947cd37c5c5114fb78fdf68b1a5f | [
"MIT"
] | null | null | null | docker/app/tests/test_preprocess.py | Karthick47v2/mcq-generator | e35a877ed5d0947cd37c5c5114fb78fdf68b1a5f | [
"MIT"
] | null | null | null | """unit tests for preprocess.py"""
import pytest
from src import preprocess
class TestPreprocessBulkText:
"""class holding test cases for preprocess_bulk_text function"""
# pylint: disable=no-self-use
@pytest.mark.parametrize('text, result', [
("""
Natural language processing (NLP) is the ability of a computer
program to understand human language as it is spoken and written -- referred
to as natural language.
""", "Natural language processing (NLP) is the ability of a computer program to understand"
+ " human language as it is spoken and written -- referred to as natural language."), (
"ValueError\
:\
attempted \
relative \
import \
beyond\
top-level \
package",
"ValueError : attempted relative import beyond top-level package")
])
def test_whitespace(self, text, result):
"""test whether unnecessary whitespaces got covered
Args:
text (str): test input
result (str): test result
"""
assert preprocess.preprocess_bulk_text(
text) == result, "Check whitespaces"
@pytest.mark.parametrize('text, result', [(
"⁍ ‣ValueError!!!@: attempted relative import" +
" beyond~top-level package_", " ValueError : " +
"attempted relative import beyond top-level package "
)])
def test_punctuation_marks(self, text, result):
"""test whether unnecessary punctuation marks are avoided
Args:
text (str): test input
result (str): test result
"""
assert preprocess.preprocess_bulk_text(
text) == result, "Unwanted punctuation marks"
class TestSplitText:
"""class holding test cases for split_text function"""
# pylint: disable=no-self-use
@pytest.mark.parametrize('text, result', [
("This test will split correctly at period.",
["This test will split correctly at period."]),
("This will split before period.",
["This will split before period."])
])
def test_split_correctly_at_range(self, text, result):
"""test whether correctly split at period when its the
threshold
Args:
text (str): test input
result (list[str]): test result
"""
assert preprocess.split_text(
text, 41) == result, "Not splitted correctly."
assert isinstance(preprocess.split_text(text, 42),
list), "function must return a list"
@pytest.mark.parametrize('text, result', [
("This is first sentence. Assume this is a long text.",
["This is first sentence. Assume this is a long text."])
])
def test_split_tolerance(self, text, result):
"""test whether correctly split at period when threshold
is passed
we put threshold as 25.. That passed first sentence but
it will only split when it encounter a period after threshold passed.
so whole test corpus will be inside 0th index of splitted text.
Args:
text (str): test input
result (list[str]): test result
"""
assert preprocess.split_text(
text, 25)[0] == result[0], "Need to split after period."
@pytest.mark.parametrize('query, result', [
([('bat|NOUN', 0.0), ('Karthick|PRONOUN', 0.0)], ['Bat', 'Karthick']),
([('natural_language_processing|NOUN', 0.0)], ['Natural language processing'])
])
def test_change_format(query, result):
"""change output from sense2vec to fair readable form
Args:
query (list[tuple[str, float]]): test input
result (list[str]): test result
"""
assert preprocess.change_format(
query) == result, "Failed to process s2v format."
| 35.154545 | 100 | 0.609516 | 444 | 3,867 | 5.256757 | 0.295045 | 0.042845 | 0.044987 | 0.040703 | 0.627249 | 0.593402 | 0.529563 | 0.529563 | 0.498715 | 0.478149 | 0 | 0.006902 | 0.288079 | 3,867 | 109 | 101 | 35.477064 | 0.840174 | 0.268425 | 0 | 0.192982 | 0 | 0 | 0.431486 | 0.012317 | 0 | 0 | 0 | 0 | 0.105263 | 1 | 0.087719 | false | 0 | 0.105263 | 0 | 0.22807 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ac713a7f99994b6c7bcda9df66734e9fa7f27db | 4,550 | py | Python | yonosumi_utils/voice.py | Saroniii/yonosumi_official_bot | ef09ff8e9c089c0df8d191fe5db665f0f7322fd3 | [
"MIT"
] | 5 | 2020-09-23T01:06:00.000Z | 2020-11-24T04:39:58.000Z | yonosumi_utils/voice.py | Saroniii/yonosumi_official_bot | ef09ff8e9c089c0df8d191fe5db665f0f7322fd3 | [
"MIT"
] | 13 | 2020-10-10T16:00:16.000Z | 2020-11-26T02:02:57.000Z | yonosumi_utils/voice.py | YonosumiProject/yonosumi_official_bot | f8e3d2c0f7c0320cdb9247917d6d21f208ec7a77 | [
"MIT"
] | 2 | 2021-04-19T21:46:00.000Z | 2021-08-16T07:23:11.000Z | import discord
from discord.ext import commands
from yonosumi_utils import my_channel
from typing import Union, Callable, List
from logging import Logger
import yonosumi_utils
class voice:
def is_active(self, channel: discord.VoiceChannel, count_bots=True) -> bool:
"""
通話に人がいるかどうかを確認します。
"""
if count_bots == True:
member_count: int = len(channel.members)
else:
member_count: int = len(
[i for i in channel.members if i.bot == False])
if channel == None or member_count > 0:
return True
else:
return False
def is_muted_text_channel(self, channel: discord.TextChannel) -> bool:
"""
指定したチャンネルが聞き専チャンネルかどうか確認します。
"""
topic_split: list = my_channel.get_topic(channel, split=True)
if topic_split[0] == "これは自動生成されたテキストチャンネルです。":
return True
else:
return False
def is_voice_control_panel(self, message: discord.Message, bot :commands.Bot) -> bool:
"""
指定したメッセージが自動生成されたボイスチャンネルのコントロールパネルか確認します。
"""
try:
if message.embeds[0].description == self.control_panel_description() and message.author == bot.user:
return True
else:
return False
except:
return False
def is_generate_voice_channel(self, channel: discord.VoiceChannel) -> bool:
"""
指定したチャンネルがボイスチャンネルを生成するチャンネルか確認します。
"""
generate_channel_id = 776403002356924436
if channel.id == generate_channel_id:
return True
else:
return False
def is_auto_voice_channel(self, channel: discord.VoiceChannel) -> bool:
"""
指定されたチャンネルが生成されたボイスチャンネルか確認します。
"""
voice_category_id = 770140316078309416
if voice_category_id == channel.category.id and not self.is_generate_voice_channel(channel) and channel != channel.guild.afk_channel:
return True
else:
return False
def generate_auto_voice_topic(self, voice: discord.VoiceChannel, member: discord.Member) -> str:
"""
自動生成されたチャンネルのトピックを生成します。
"""
return f"これは自動生成されたテキストチャンネルです。\n{voice.id}\n{member.id}"
async def clean_null_auto_voice_channels(self, category: discord.CategoryChannel, ignore_vcs: list, logger: Logger) -> List[str]:
"""
誰もいない自動生成されたボイスチャンネルを検知し、削除します。
"""
id_list = []
channel: discord.VoiceChannel
for channel in category.channels:
if type(channel) == discord.VoiceChannel:
if not self.is_active(channel) and self.is_auto_voice_channel(channel) and not channel.id in ignore_vcs:
id_list.append(str(channel.id))
await channel.delete(reason="誰もいないため")
logger.info(f"自動生成されたVC({channel.name})を削除しました")
return id_list
async def clean_null_auto_text_channels(self, category: discord.CategoryChannel, channels: Callable[[discord.CategoryChannel], list], logger: Logger):
"""
使われていない自動生成されたテキストチャンネルを検知し、削除します。
※第二引数でclean_null_auto_voice_channelsを呼び出す想定で実装しています。
"""
for channel in category.channels:
if type(channel) == discord.TextChannel:
topic = my_channel.get_topic(channel, split=True)
if topic is None:
continue
elif topic[1] in channels:
await channel.delete(reason="誰もいないため")
logger.info(f"自動生成されたTC({channel.name})を削除しました")
async def get_auto_voice_owner(self, channel: discord.TextChannel) -> Union[discord.Member, None]:
"""
自動生成されたチャンネルのオーナーのメンバーオブジェクトを返します。
取得できなかった場合はNoneが返ります。
"""
id = yonosumi_utils.get_topic(channel, split=True)[2]
try:
return await channel.guild.fetch_member(int(id))
except:
return None
def is_hide(self, channel :discord.VoiceChannel) -> bool:
guild :discord.Guild = channel.guild
everyone_perms = dict(channel.overwrites_for(guild.default_role))
if everyone_perms['view_channel'] == True:
return False
return True
@staticmethod
def control_panel_description() -> str:
"""
コントロールパネルのdescriptionを呼び出すショートカット関数です。
"""
return "ここでは、該当するリアクションを押すことで様々な設定を行うことが出来ます。\n\n✏:チャンネル名の変更\n\n🔒:利用可能人数の制限"
| 33.703704 | 154 | 0.617143 | 464 | 4,550 | 5.887931 | 0.267241 | 0.04612 | 0.039531 | 0.036603 | 0.248536 | 0.172035 | 0.161786 | 0.095168 | 0.064422 | 0 | 0 | 0.012773 | 0.294505 | 4,550 | 134 | 155 | 33.955224 | 0.837383 | 0.048791 | 0 | 0.341772 | 0 | 0 | 0.057889 | 0.05123 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101266 | false | 0 | 0.075949 | 0 | 0.417722 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ac7ff8205294739a43ab1f546fe9ab5493fc802 | 10,993 | py | Python | src/learners/q_learner.py | benellis3/REFIL | fe3d6ea5a6eb307128cf8a47ddc6e59cb126a52a | [
"MIT"
] | 25 | 2020-07-20T01:44:12.000Z | 2021-04-26T07:38:01.000Z | src/learners/q_learner.py | benellis3/REFIL | fe3d6ea5a6eb307128cf8a47ddc6e59cb126a52a | [
"MIT"
] | 1 | 2021-02-05T07:18:48.000Z | 2021-02-05T07:18:48.000Z | src/learners/q_learner.py | benellis3/REFIL | fe3d6ea5a6eb307128cf8a47ddc6e59cb126a52a | [
"MIT"
] | 7 | 2020-08-05T21:12:11.000Z | 2021-04-12T14:23:43.000Z | import copy
from components.episode_buffer import EpisodeBatch
from modules.mixers.vdn import VDNMixer
from modules.mixers.qmix import QMixer
from modules.mixers.flex_qmix import FlexQMixer, LinearFlexQMixer
import torch as th
from torch.optim import RMSprop
class QLearner:
def __init__(self, mac, scheme, logger, args):
self.args = args
self.mac = mac
self.logger = logger
self.params = list(mac.parameters())
self.last_target_update_episode = 0
self.mixer = None
if args.mixer is not None:
if args.mixer == "vdn":
self.mixer = VDNMixer()
elif args.mixer == "qmix":
self.mixer = QMixer(args)
elif args.mixer == "flex_qmix":
assert args.entity_scheme, "FlexQMixer only available with entity scheme"
self.mixer = FlexQMixer(args)
elif args.mixer == "lin_flex_qmix":
assert args.entity_scheme, "FlexQMixer only available with entity scheme"
self.mixer = LinearFlexQMixer(args)
else:
raise ValueError("Mixer {} not recognised.".format(args.mixer))
self.params += list(self.mixer.parameters())
self.target_mixer = copy.deepcopy(self.mixer)
self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps,
weight_decay=args.weight_decay)
# a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
self.target_mac = copy.deepcopy(mac)
self.log_stats_t = -self.args.learner_log_interval - 1
def _get_mixer_ins(self, batch, repeat_batch=1):
if not self.args.entity_scheme:
return (batch["state"][:, :-1].repeat(repeat_batch, 1, 1),
batch["state"][:, 1:])
else:
entities = []
bs, max_t, ne, ed = batch["entities"].shape
entities.append(batch["entities"])
if self.args.entity_last_action:
last_actions = th.zeros(bs, max_t, ne, self.args.n_actions,
device=batch.device,
dtype=batch["entities"].dtype)
last_actions[:, 1:, :self.args.n_agents] = batch["actions_onehot"][:, :-1]
entities.append(last_actions)
entities = th.cat(entities, dim=3)
return ((entities[:, :-1].repeat(repeat_batch, 1, 1, 1),
batch["entity_mask"][:, :-1].repeat(repeat_batch, 1, 1)),
(entities[:, 1:],
batch["entity_mask"][:, 1:]))
def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
# Get the relevant quantities
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].float()
mask = batch["filled"][:, :-1].float()
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
avail_actions = batch["avail_actions"]
will_log = (t_env - self.log_stats_t >= self.args.learner_log_interval)
# # Calculate estimated Q-Values
# mac_out = []
self.mac.init_hidden(batch.batch_size)
# enable things like dropout on mac and mixer, but not target_mac and target_mixer
self.mac.train()
self.mixer.train()
self.target_mac.eval()
self.target_mixer.eval()
if 'imagine' in self.args.agent:
all_mac_out, groups = self.mac.forward(batch, t=None, imagine=True,
use_gt_factors=self.args.train_gt_factors,
use_rand_gt_factors=self.args.train_rand_gt_factors)
# Pick the Q-Values for the actions taken by each agent
rep_actions = actions.repeat(3, 1, 1, 1)
all_chosen_action_qvals = th.gather(all_mac_out[:, :-1], dim=3, index=rep_actions).squeeze(3) # Remove the last dim
mac_out, moW, moI = all_mac_out.chunk(3, dim=0)
chosen_action_qvals, caqW, caqI = all_chosen_action_qvals.chunk(3, dim=0)
caq_imagine = th.cat([caqW, caqI], dim=2)
if will_log and self.args.test_gt_factors:
gt_all_mac_out, gt_groups = self.mac.forward(batch, t=None, imagine=True, use_gt_factors=True)
# Pick the Q-Values for the actions taken by each agent
gt_all_chosen_action_qvals = th.gather(gt_all_mac_out[:, :-1], dim=3, index=rep_actions).squeeze(3) # Remove the last dim
gt_mac_out, gt_moW, gt_moI = gt_all_mac_out.chunk(3, dim=0)
gt_chosen_action_qvals, gt_caqW, gt_caqI = gt_all_chosen_action_qvals.chunk(3, dim=0)
gt_caq_imagine = th.cat([gt_caqW, gt_caqI], dim=2)
else:
mac_out = self.mac.forward(batch, t=None)
# Pick the Q-Values for the actions taken by each agent
chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
self.target_mac.init_hidden(batch.batch_size)
target_mac_out = self.target_mac.forward(batch, t=None)
avail_actions_targ = avail_actions
target_mac_out = target_mac_out[:, 1:]
# Mask out unavailable actions
target_mac_out[avail_actions_targ[:, 1:] == 0] = -9999999 # From OG deepmarl
# Max over target Q-Values
if self.args.double_q:
# Get actions that maximise live Q (for double q-learning)
mac_out_detach = mac_out.clone().detach()
mac_out_detach[avail_actions_targ == 0] = -9999999
cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]
target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
else:
target_max_qvals = target_mac_out.max(dim=3)[0]
# Mix
if self.mixer is not None:
if 'imagine' in self.args.agent:
mix_ins, targ_mix_ins = self._get_mixer_ins(batch)
chosen_action_qvals = self.mixer(chosen_action_qvals,
mix_ins)
# don't need last timestep
groups = [gr[:, :-1] for gr in groups]
if will_log and self.args.test_gt_factors:
caq_imagine, ingroup_prop = self.mixer(
caq_imagine, mix_ins,
imagine_groups=groups,
ret_ingroup_prop=True)
gt_groups = [gr[:, :-1] for gr in gt_groups]
gt_caq_imagine, gt_ingroup_prop = self.mixer(
gt_caq_imagine, mix_ins,
imagine_groups=gt_groups,
ret_ingroup_prop=True)
else:
caq_imagine = self.mixer(caq_imagine, mix_ins,
imagine_groups=groups)
else:
mix_ins, targ_mix_ins = self._get_mixer_ins(batch)
chosen_action_qvals = self.mixer(chosen_action_qvals, mix_ins)
target_max_qvals = self.target_mixer(target_max_qvals, targ_mix_ins)
# Calculate 1-step Q-Learning targets
targets = rewards + self.args.gamma * (1 - terminated) * target_max_qvals
# Td-error
td_error = (chosen_action_qvals - targets.detach())
mask = mask.expand_as(td_error)
# 0-out the targets that came from padded data
masked_td_error = td_error * mask
# Normal L2 loss, take mean over actual data
loss = (masked_td_error ** 2).sum() / mask.sum()
if 'imagine' in self.args.agent:
im_prop = self.args.lmbda
im_td_error = (caq_imagine - targets.detach())
im_masked_td_error = im_td_error * mask
im_loss = (im_masked_td_error ** 2).sum() / mask.sum()
loss = (1 - im_prop) * loss + im_prop * im_loss
# Optimise
self.optimiser.zero_grad()
loss.backward()
grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
self.optimiser.step()
if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
self._update_targets()
self.last_target_update_episode = episode_num
if t_env - self.log_stats_t >= self.args.learner_log_interval:
self.logger.log_stat("loss", loss.item(), t_env)
if 'imagine' in self.args.agent:
self.logger.log_stat("im_loss", im_loss.item(), t_env)
if self.args.test_gt_factors:
self.logger.log_stat("ingroup_prop", ingroup_prop.item(), t_env)
self.logger.log_stat("gt_ingroup_prop", gt_ingroup_prop.item(), t_env)
self.logger.log_stat("grad_norm", grad_norm, t_env)
mask_elems = mask.sum().item()
self.logger.log_stat("td_error_abs", (masked_td_error.abs().sum().item()/mask_elems), t_env)
self.logger.log_stat("q_taken_mean", (chosen_action_qvals * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.logger.log_stat("target_mean", (targets * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
if batch.max_seq_length == 2:
# We are in a 1-step env. Calculate the max Q-Value for logging
max_agent_qvals = mac_out_detach[:,0].max(dim=2, keepdim=True)[0]
max_qtots = self.mixer(max_agent_qvals, batch["state"][:,0])
self.logger.log_stat("max_qtot", max_qtots.mean().item(), t_env)
self.log_stats_t = t_env
def _update_targets(self):
self.target_mac.load_state(self.mac)
if self.mixer is not None:
self.target_mixer.load_state_dict(self.mixer.state_dict())
self.logger.console_logger.info("Updated target network")
def cuda(self):
self.mac.cuda()
self.target_mac.cuda()
if self.mixer is not None:
self.mixer.cuda()
self.target_mixer.cuda()
def save_models(self, path):
self.mac.save_models(path)
if self.mixer is not None:
th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))
def load_models(self, path, evaluate=False):
self.mac.load_models(path)
# Not quite right but I don't want to save target networks
self.target_mac.load_models(path)
if not evaluate:
if self.mixer is not None:
self.mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
self.optimiser.load_state_dict(th.load("{}/opt.th".format(path), map_location=lambda storage, loc: storage))
| 47.795652 | 138 | 0.591467 | 1,455 | 10,993 | 4.220619 | 0.170447 | 0.031265 | 0.035988 | 0.024915 | 0.367041 | 0.310861 | 0.235629 | 0.212669 | 0.193454 | 0.13532 | 0 | 0.011919 | 0.297826 | 10,993 | 229 | 139 | 48.004367 | 0.783651 | 0.080415 | 0 | 0.129944 | 0 | 0 | 0.043439 | 0 | 0 | 0 | 0 | 0 | 0.011299 | 1 | 0.039548 | false | 0 | 0.039548 | 0 | 0.096045 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2acb17ec53db463ce7a137bd8aac418e998c50c9 | 5,733 | py | Python | discord_dictionary_bot/__main__.py | TychoTheTaco/Discord-Dictionary-Bot | c13e8955a39ce6ef49aecd7071a88ce9866d3a03 | [
"MIT"
] | 4 | 2021-03-29T23:35:04.000Z | 2021-12-12T20:35:49.000Z | discord_dictionary_bot/__main__.py | TychoTheTaco/Discord-Dictionary-Bot | c13e8955a39ce6ef49aecd7071a88ce9866d3a03 | [
"MIT"
] | 2 | 2020-12-08T23:56:00.000Z | 2021-05-15T03:37:33.000Z | discord_dictionary_bot/__main__.py | TychoTheTaco/Discord-Dictionary-Bot | c13e8955a39ce6ef49aecd7071a88ce9866d3a03 | [
"MIT"
] | 4 | 2021-03-29T04:29:13.000Z | 2021-12-12T20:37:56.000Z | import argparse
import os
import logging.config
import google.cloud.logging
from google.cloud.logging.handlers import CloudLoggingHandler
from .discord_bot_client import DiscordBotClient
from .dictionary_api import OwlBotDictionaryAPI, UnofficialGoogleAPI, MerriamWebsterCollegiateAPI, RapidWordsAPI, MerriamWebsterMedicalAPI
def logging_filter(record):
"""
Filter logs so that only records from this module are shown.
:param record:
:return:
"""
return 'discord_dictionary_bot' in record.name or 'discord_dictionary_bot' in record.pathname
def gcp_logging_filter(record):
return 'google.cloud.logging_v2.handlers.transports.background_thread' not in record.name
# Set up logging
logging.basicConfig(format='%(asctime)s [%(name)s] [%(levelname)s] %(message)s', level=logging.DEBUG, datefmt='%m/%d/%Y %H:%M:%S')
logging.getLogger().handlers[0].addFilter(gcp_logging_filter)
logging.getLogger().handlers[0].addFilter(logging_filter)
def try_read_token(token_or_path: str) -> str:
"""
Try to read from the given file. If the file can be read, return the file contents. Otherwise, return the argument.
:param token_or_path:
:return:
"""
try:
with open(token_or_path) as file:
return file.read()
except IOError:
pass # Ignore and assume the argument is a token string not a file path
return token_or_path
def main():
dictionary_api_options = {
'google': {
'class': UnofficialGoogleAPI
},
'owlbot': {
'class': OwlBotDictionaryAPI,
'key_arg_dest': 'owlbot_api_token',
'key_arg_name': '--owlbot-api-token',
'name': 'Owlbot'
},
'webster-collegiate': {
'class': MerriamWebsterCollegiateAPI,
'key_arg_dest': 'webster_collegiate_api_token',
'key_arg_name': '--webster-collegiate-api-token',
'name': 'Merriam Webster Collegiate'
},
'webster-medical': {
'class': MerriamWebsterMedicalAPI,
'key_arg_dest': 'webster_medical_api_token',
'key_arg_name': '--webster-medical-api-token',
'name': 'Merriam Webster Medical'
},
'rapid-words': {
'class': RapidWordsAPI,
'key_arg_dest': 'rapid_words_api_token',
'key_arg_name': '--rapid-words-api-token',
'name': 'RapidWords'
},
}
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--discord-token',
help='Token to use when running the bot. You can either use the raw token string or a path to a text file containing the token.',
dest='discord_bot_token',
default='discord_token.txt')
parser.add_argument('--ffmpeg-path',
help='Path to ffmpeg executable.',
dest='ffmpeg_path',
default='ffmpeg')
parser.add_argument('--google-credentials-path',
help='Path to Google application credentials JSON file.',
dest='google_credentials_path',
default='google_credentials.json')
parser.add_argument('--dictionary-api',
help='A list of dictionary API\'s to use for fetching definitions. These should be in order of priority and separated by comma\'s. Available API\'s are '
+ ', '.join(['\'' + x + '\'' for x in dictionary_api_options])
+ '. Some API\'s require tokens that must be provided with the appropriate arguments.',
dest='dictionary_api',
default=next(iter(dictionary_api_options)))
# Add API key arguments for dictionary API's
for k, v in dictionary_api_options.items():
if 'key_arg_dest' not in v or 'key_arg_name' not in v:
continue
parser.add_argument(v['key_arg_name'],
help=f'The token to use for the {v["name"]} dictionary API. You can use either the raw token string or a path to a text file containing the token.',
dest=v['key_arg_dest'],
default=f'{v["key_arg_dest"]}.txt')
args = parser.parse_args()
# Set Google API credentials
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = args.google_credentials_path
# Set up GCP logging
gcp_logging_client = google.cloud.logging.Client()
gcp_logging_handler = CloudLoggingHandler(gcp_logging_client, name='discord-dictionary-bot')
gcp_logging_handler.addFilter(gcp_logging_filter)
logging.getLogger().addHandler(gcp_logging_handler)
# Check which dictionary API we should use
dictionary_apis = []
for name in args.dictionary_api.split(','):
if name not in dictionary_api_options:
print(f'Invalid dictionary API: "{name}"')
return
api_info = dictionary_api_options[name]
# If this API requires a key, try to load it now
if 'key_arg_dest' in api_info:
if api_info['key_arg_dest'] not in args:
print(f'You must specify an API token with {api_info["key_arg_name"]} to use the {api_info["name"]} dictionary API!')
return
api_token = try_read_token(vars(args)[api_info["key_arg_dest"]])
dictionary_apis.append(api_info["class"](api_token))
else:
dictionary_apis.append(api_info["class"]())
# Start client
bot = DiscordBotClient(dictionary_apis, args.ffmpeg_path)
bot.run(try_read_token(args.discord_bot_token))
if __name__ == '__main__':
main()
| 39.8125 | 177 | 0.626548 | 688 | 5,733 | 5.014535 | 0.255814 | 0.029565 | 0.028986 | 0.016232 | 0.15971 | 0.089855 | 0.033043 | 0.033043 | 0.033043 | 0.033043 | 0 | 0.000717 | 0.270539 | 5,733 | 143 | 178 | 40.090909 | 0.824247 | 0.090354 | 0 | 0.019802 | 0 | 0.029703 | 0.293217 | 0.083527 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039604 | false | 0.009901 | 0.069307 | 0.009901 | 0.168317 | 0.019802 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2acd3a31447ec8fc383f4b3bbca9a7a9fbc1d239 | 4,526 | py | Python | twext/internet/decorate.py | troglodyne/ccs-twistedextensions | 1b43cb081ba68ae310140a9e853e041cd6362625 | [
"Apache-2.0"
] | 23 | 2016-08-14T07:20:27.000Z | 2021-11-08T09:47:45.000Z | twext/internet/decorate.py | DalavanCloud/ccs-twistedextensions | 2c4046df88873dcf33fba7840ed90e4238dcbec7 | [
"Apache-2.0"
] | 2 | 2016-12-15T17:51:49.000Z | 2019-05-12T15:59:03.000Z | twext/internet/decorate.py | DalavanCloud/ccs-twistedextensions | 2c4046df88873dcf33fba7840ed90e4238dcbec7 | [
"Apache-2.0"
] | 20 | 2016-08-17T06:51:00.000Z | 2022-03-26T11:55:56.000Z | ##
# Copyright (c) 2010-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Decorators.
"""
__all__ = [
"memoizedKey",
]
from inspect import getargspec
from twisted.internet.defer import Deferred, succeed
class Memoizable(object):
"""
A class that stores itself in the memo dictionary.
"""
def memoMe(self, key, memo):
"""
Add this object to the memo dictionary in whatever fashion is appropriate.
@param key: key used for lookup
@type key: C{object} (typically C{str} or C{int})
@param memo: the dict to store to
@type memo: C{dict}
"""
raise NotImplementedError
def memoizedKey(keyArgument, memoAttribute, deferredResult=True):
"""
Decorator which memoizes the result of a method on that method's instance. If the instance is derived from
class Memoizable, then the memoMe method is used to store the result, otherwise it is stored directly in
the dict.
@param keyArgument: The name of the "key" argument.
@type keyArgument: C{str}
@param memoAttribute: The name of the attribute on the instance which
should be used for memoizing the result of this method; the attribute
itself must be a dictionary. Alternately, if the specified argument is
callable, it is a callable that takes the arguments passed to the
decorated method and returns the memo dictionaries.
@type memoAttribute: C{str} or C{callable}
@param deferredResult: Whether the result must be a deferred.
"""
def getarg(argname, argspec, args, kw):
"""
Get an argument from some arguments.
@param argname: The name of the argument to retrieve.
@param argspec: The result of L{inspect.getargspec}.
@param args: positional arguments passed to the function specified by
argspec.
@param kw: keyword arguments passed to the function specified by
argspec.
@return: The value of the argument named by 'argname'.
"""
argnames = argspec[0]
try:
argpos = argnames.index(argname)
except ValueError:
argpos = None
if argpos is not None:
if len(args) > argpos:
return args[argpos]
if argname in kw:
return kw[argname]
else:
raise TypeError("could not find key argument %r in %r/%r (%r)" % (
argname, args, kw, argpos
))
def decorate(thunk):
# cheater move to try to get the right argspec from inlineCallbacks.
# This could probably be more robust, but the 'cell_contents' thing
# probably can't (that's the only real reference to the underlying
# function).
if thunk.func_code.co_name == "unwindGenerator":
specTarget = thunk.func_closure[0].cell_contents
else:
specTarget = thunk
spec = getargspec(specTarget)
def outer(*a, **kw):
self = a[0]
if callable(memoAttribute):
memo = memoAttribute(*a, **kw)
else:
memo = getattr(self, memoAttribute)
key = getarg(keyArgument, spec, a, kw)
if key in memo:
memoed = memo[key]
if deferredResult:
return succeed(memoed)
else:
return memoed
result = thunk(*a, **kw)
if isinstance(result, Deferred):
def memoResult(finalResult):
if isinstance(finalResult, Memoizable):
finalResult.memoMe(key, memo)
elif finalResult is not None:
memo[key] = finalResult
return finalResult
result.addCallback(memoResult)
elif result is not None:
memo[key] = result
return result
return outer
return decorate
| 32.797101 | 110 | 0.610473 | 545 | 4,526 | 5.053211 | 0.370642 | 0.021786 | 0.011983 | 0.013072 | 0.045025 | 0.033406 | 0.033406 | 0.033406 | 0 | 0 | 0 | 0.004865 | 0.318825 | 4,526 | 137 | 111 | 33.036496 | 0.88842 | 0.480999 | 0 | 0.070175 | 0 | 0 | 0.033128 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.035088 | 0 | 0.298246 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2acdcfd92bc1d9c06ae492411fa9509e9f33c7da | 9,467 | py | Python | netconf/client.py | fortinet-solutions-cse/netconf-rest | 2147cc5d37eda848d9d87a8d74b26df9bc487384 | [
"Apache-2.0"
] | 7 | 2018-04-13T17:57:22.000Z | 2022-02-08T11:49:18.000Z | netconf/client.py | fortinet-solutions-cse/netconf-rest | 2147cc5d37eda848d9d87a8d74b26df9bc487384 | [
"Apache-2.0"
] | null | null | null | netconf/client.py | fortinet-solutions-cse/netconf-rest | 2147cc5d37eda848d9d87a8d74b26df9bc487384 | [
"Apache-2.0"
] | 2 | 2020-01-18T19:20:37.000Z | 2021-05-21T18:55:33.000Z | # -*- coding: utf-8 eval: (yapf-mode 1) -*-
#
# February 19 2015, Christian Hopps <chopps@gmail.com>
#
# Copyright (c) 2015, Deutsche Telekom AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, unicode_literals, print_function, nested_scopes
import logging
import io
import threading
import socket
import sshutil.conn
from lxml import etree
from monotonic import monotonic
from netconf import NSMAP
from netconf.base import NetconfSession
from netconf.error import RPCError, SessionError, ReplyTimeoutError
logger = logging.getLogger(__name__)
def _is_filter(select):
return select.lstrip().startswith("<")
def _get_selection(select):
if select is None:
return ""
elif _is_filter(select) is not None:
return "<filter>{}</filter>".format(select)
else:
return """<filter type="xpath" select="{}"/>""".format(select)
class Timeout(object):
def __init__(self, timeout):
self.start_time = monotonic()
if timeout is None:
self.end_time = None
else:
self.end_time = self.start_time + timeout
def is_expired(self):
if self.end_time is None:
return False
return self.end_time < monotonic()
def remaining(self):
if self.end_time is None:
return None
ctime = monotonic()
if self.end_time < ctime:
return 0
else:
return self.end_time - ctime
class NetconfClientSession(NetconfSession):
"""Netconf Protocol"""
def __init__(self, stream, debug=False):
super(NetconfClientSession, self).__init__(stream, debug, None)
self.message_id = 0
self.closing = False
self.rpc_out = {}
# Condition to handle rpc_out queue
self.cv = threading.Condition()
super(NetconfClientSession, self)._open_session(False)
def __str__(self):
return "NetconfClientSession(sid:{})".format(self.session_id)
def close(self):
if self.debug:
logger.debug("%s: Closing session.", str(self))
reply = None
try:
# So we need a lock here to check these members.
send = False
with self.cv:
if self.session_id is not None and self.is_active():
send = True
if send:
self.send_rpc_async("<close-session/>", noreply=True)
# Don't wait for a reply the session is closed!
except socket.error:
if self.debug:
logger.debug("Got socket error sending close-session request, ignoring")
super(NetconfClientSession, self).close()
if self.debug:
logger.debug("%s: Closed: %s", str(self), str(reply))
def send_rpc_async(self, rpc, noreply=False):
# Get the next message id
with self.cv:
assert self.session_id is not None
msg_id = self.message_id
self.message_id += 1
if self.debug:
logger.debug("%s: Sending RPC message-id: %s", str(self), str(msg_id))
def sendit():
self.send_message("""<rpc message-id="{}"
xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">{}</rpc>""".format(msg_id, rpc))
if noreply:
sendit()
return None
with self.cv:
sendit()
# Mark us as expecting a reply
self.rpc_out[msg_id] = None
return msg_id
def send_rpc(self, rpc, timeout=None):
msg_id = self.send_rpc_async(rpc)
return self.wait_reply(msg_id, timeout)
def is_reply_ready(self, msg_id):
"""Check whether reply is ready (or session closed)"""
with self.cv:
if not self.is_active():
raise SessionError("Session closed while checking for reply")
return self.rpc_out[msg_id] is not None
def wait_reply(self, msg_id, timeout=None):
assert msg_id in self.rpc_out
check_timeout = Timeout(timeout)
self.cv.acquire()
# XXX need to make sure the channel doesn't close
while self.rpc_out[msg_id] is None and self.is_active():
remaining = check_timeout.remaining()
self.cv.wait(remaining)
if self.rpc_out[msg_id] is not None:
break
if check_timeout.is_expired():
raise ReplyTimeoutError(
"Timeout ({}s) while waiting for RPC reply to msg-id: {}".format(
timeout, msg_id))
if not self.is_active():
self.cv.release()
raise SessionError("Session closed while waiting for reply")
tree, reply, msg = self.rpc_out[msg_id]
del self.rpc_out[msg_id]
self.cv.release()
error = reply.xpath("nc:rpc-error", namespaces=NSMAP)
if error:
raise RPCError(msg, tree, error[0])
# data = reply.xpath("nc:data", namespaces=self.nsmap)
# ok = reply.xpath("nc:ok", namespaces=self.nsmap)
return tree, reply, msg
def reader_exits(self):
if self.debug:
logger.debug("%s: Reader thread exited notifying all.", str(self))
with self.cv:
self.cv.notify_all()
def reader_handle_message(self, msg):
"""Handle a message, lock is already held"""
try:
tree = etree.parse(io.BytesIO(msg.encode('utf-8')))
if not tree:
raise SessionError(msg, "Invalid XML from server.")
except etree.XMLSyntaxError:
raise SessionError(msg, "Invalid XML from server.")
replies = tree.xpath("/nc:rpc-reply", namespaces=NSMAP)
if not replies:
raise SessionError(msg, "No rpc-reply found")
for reply in replies:
try:
msg_id = int(reply.get('message-id'))
except (TypeError, ValueError):
# # Cisco is returning errors without message-id attribute which is non-rfc-conforming
# # it is doing this for any malformed XML not simply missing message-id attribute.
# error = reply.xpath("nc:rpc-error", namespaces=self.nsmap)
# if error:
# raise RPCError(received, tree, error[0])
raise SessionError(msg, "No valid message-id attribute found")
# Queue the message
with self.cv:
try:
if msg_id not in self.rpc_out:
if self.debug:
logger.debug("Ignoring unwanted reply for message-id %s", str(msg_id))
return
elif self.rpc_out[msg_id] is not None:
logger.warning("Received multiple replies for message-id %s:"
" before: %s now: %s", str(msg_id), str(
self.rpc_out[msg_id]), str(msg))
if self.debug:
logger.debug("%s: Received rpc-reply message-id: %s", str(self),
str(msg_id))
self.rpc_out[msg_id] = tree, reply, msg
except Exception as error:
logger.debug("%s: Unexpected exception: %s", str(self), str(error))
raise
finally:
self.cv.notify_all()
def get_config_async(self, source, select):
rpc = "<get-config><source><{}/></source>".format(source)
rpc += _get_selection(select)
rpc += "</get-config>"
return self.send_rpc_async(rpc)
def get_config(self, source="running", select=None, timeout=None):
msg_id = self.get_config_async(source, select)
_, reply, _ = self.wait_reply(msg_id, timeout)
return reply.find("nc:config", namespaces=NSMAP)
def get_async(self, select):
rpc = "<get>" + _get_selection(select) + "</get>"
return self.send_rpc_async(rpc)
def get(self, select=None, timeout=None):
msg_id = self.get_async(select)
_, reply, _ = self.wait_reply(msg_id, timeout)
return reply.find("nc:data", namespaces=NSMAP)
class NetconfSSHSession(NetconfClientSession):
def __init__(self,
host,
port=830,
username=None,
password=None,
debug=False,
cache=None,
proxycmd=None):
if username is None:
import getpass
username = getpass.getuser()
stream = sshutil.conn.SSHClientSession(
host, port, "netconf", username, password, debug, cache=cache, proxycmd=proxycmd)
super(NetconfSSHSession, self).__init__(stream, debug)
__author__ = 'Christian Hopps'
__date__ = 'February 19 2015'
__version__ = '1.0'
__docformat__ = "restructuredtext en"
| 34.053957 | 102 | 0.584557 | 1,149 | 9,467 | 4.6745 | 0.241079 | 0.026997 | 0.022342 | 0.021784 | 0.214299 | 0.143921 | 0.115062 | 0.077081 | 0.019736 | 0.019736 | 0 | 0.005395 | 0.314778 | 9,467 | 277 | 103 | 34.176895 | 0.822568 | 0.146298 | 0 | 0.201058 | 0 | 0.005291 | 0.116146 | 0.014689 | 0 | 0 | 0 | 0 | 0.010582 | 1 | 0.10582 | false | 0.021164 | 0.063492 | 0.010582 | 0.291005 | 0.005291 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ad1c0b58a6c61835de8437f7dbfe232d50164ae | 871 | py | Python | setup.py | devlace/pyspark-databricks-package-base | 8203c61a7a276feb8035e698e0582d786b6e38cb | [
"MIT"
] | null | null | null | setup.py | devlace/pyspark-databricks-package-base | 8203c61a7a276feb8035e698e0582d786b6e38cb | [
"MIT"
] | null | null | null | setup.py | devlace/pyspark-databricks-package-base | 8203c61a7a276feb8035e698e0582d786b6e38cb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
import os
from setuptools import setup, find_packages
version = os.environ['PACKAGE_VERSION']
requirements = ['pyspark',]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest', ]
setup(
name='mysparkpackage',
author="Lace Lofranco",
author_email='lace.lofranco@microsoft.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.7',
],
description="A nice python package!",
install_requires=requirements,
include_package_data=True,
packages=find_packages(include=['mysparkpackage', 'mysparkpackage.core']),
setup_requires=setup_requirements,
test_suite='mysparkpackage.tests',
tests_require=test_requirements,
version=version
)
| 24.885714 | 78 | 0.690011 | 91 | 871 | 6.450549 | 0.626374 | 0.040886 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005548 | 0.172216 | 871 | 34 | 79 | 25.617647 | 0.808599 | 0.068886 | 0 | 0 | 0 | 0 | 0.339552 | 0.033582 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ad260ba19aeceda597107e709c58b8afc9f4c4d | 1,050 | py | Python | wav2clip/__init__.py | LianQi-Kevin/wav2clip-changed | 7fce9689cc0751dd2c83551c2c8e755a3bd2a88a | [
"MIT"
] | 102 | 2021-10-30T04:34:50.000Z | 2022-03-30T09:42:42.000Z | wav2clip/__init__.py | LianQi-Kevin/wav2clip-changed | 7fce9689cc0751dd2c83551c2c8e755a3bd2a88a | [
"MIT"
] | 3 | 2021-11-05T10:16:51.000Z | 2022-02-01T15:25:02.000Z | wav2clip/__init__.py | LianQi-Kevin/wav2clip-changed | 7fce9689cc0751dd2c83551c2c8e755a3bd2a88a | [
"MIT"
] | 17 | 2021-10-30T12:37:17.000Z | 2022-03-12T03:09:31.000Z | import numpy as np
import torch
from .model.encoder import ResNetExtractor
MODEL_URL = "https://github.com/descriptinc/lyrebird-wav2clip/releases/download/v0.1.0-alpha/Wav2CLIP.pt"
def get_model(device="cpu", pretrained=True, frame_length=None, hop_length=None):
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
MODEL_URL, map_location=device, progress=True
)
model = ResNetExtractor(
checkpoint=checkpoint,
scenario="frozen",
transform=True,
frame_length=frame_length,
hop_length=hop_length,
)
else:
model = ResNetExtractor(
scenario="supervise", frame_length=frame_length, hop_length=hop_length
)
model.to(device)
return model
def embed_audio(audio, model):
if len(audio.shape) == 1:
audio = np.expand_dims(audio, axis=0)
return (
model(torch.from_numpy(audio).to(next(model.parameters()).device))
.detach()
.cpu()
.numpy()
)
| 26.923077 | 105 | 0.634286 | 122 | 1,050 | 5.286885 | 0.483607 | 0.085271 | 0.093023 | 0.068217 | 0.124031 | 0.124031 | 0.124031 | 0.124031 | 0 | 0 | 0 | 0.008997 | 0.259048 | 1,050 | 38 | 106 | 27.631579 | 0.820051 | 0 | 0 | 0.064516 | 0 | 0.032258 | 0.10381 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.096774 | 0 | 0.225806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ad400881130f32c28067bc3b7ab569d224ccd15 | 2,466 | py | Python | unit_tests/test_ganesha.py | ChrisMacNaughton/charm-ceph-nfs | d8d66cf57615c8ea6f7357742fece962e46927b7 | [
"Apache-2.0"
] | null | null | null | unit_tests/test_ganesha.py | ChrisMacNaughton/charm-ceph-nfs | d8d66cf57615c8ea6f7357742fece962e46927b7 | [
"Apache-2.0"
] | 1 | 2022-03-23T23:02:01.000Z | 2022-03-25T12:59:31.000Z | unit_tests/test_ganesha.py | ChrisMacNaughton/charm-ceph-nfs | d8d66cf57615c8ea6f7357742fece962e46927b7 | [
"Apache-2.0"
] | null | null | null | import unittest
import ganesha
EXAMPLE_EXPORT = """## This export is managed by the CephNFS charm ##
EXPORT {
# Each EXPORT must have a unique Export_Id.
Export_Id = 1000;
# The directory in the exported file system this export
# is rooted on.
Path = '/volumes/_nogroup/test_ganesha_share/e12a49ef-1b2b-40b3-ba6c-7e6695bcc950';
# FSAL, Ganesha's module component
FSAL {
# FSAL name
Name = "Ceph";
User_Id = "ganesha-test_ganesha_share";
Secret_Access_Key = "AQCT9+9h4cwJOxAAue2fFvvGTWziUiR9koCHEw==";
}
# Path of export in the NFSv4 pseudo filesystem
Pseudo = '/volumes/_nogroup/test_ganesha_share/e12a49ef-1b2b-40b3-ba6c-7e6695bcc950';
SecType = "sys";
CLIENT {
Access_Type = "rw";
Clients = 0.0.0.0;
}
# User id squashing, one of None, Root, All
Squash = "None";
}
"""
class ExportTest(unittest.TestCase):
def test_parser(self):
export = ganesha.Export.from_export(EXAMPLE_EXPORT)
self.assertEqual(export.export_id, 1000)
self.assertEqual(export.clients, [{'Access_Type': 'rw', 'Clients': '0.0.0.0'}])
self.assertEqual(export.name, 'test_ganesha_share')
def test_add_client(self):
export = ganesha.Export.from_export(EXAMPLE_EXPORT)
export.add_client('10.0.0.0/8')
self.assertEqual(
export.clients,
[{'Access_Type': 'rw', 'Clients': '0.0.0.0, 10.0.0.0/8'}])
# adding again shouldn't duplicate export
export.add_client('10.0.0.0/8')
self.assertEqual(
export.clients,
[{'Access_Type': 'rw', 'Clients': '0.0.0.0, 10.0.0.0/8'}])
export.add_client('192.168.0.0/16')
self.assertEqual(
export.clients,
[{
'Access_Type': 'rw', 'Clients': '0.0.0.0, 10.0.0.0/8, 192.168.0.0/16'
}])
def test_remove_client(self):
export = ganesha.Export.from_export(EXAMPLE_EXPORT)
export.add_client('10.0.0.0/8')
export.add_client('192.168.0.0/16')
self.assertEqual(
export.clients,
[{
'Access_Type': 'rw',
'Clients': '0.0.0.0, 10.0.0.0/8, 192.168.0.0/16'
}])
export.remove_client('0.0.0.0')
self.assertEqual(
export.clients,
[
{'Access_Type': 'rw', 'Clients': '10.0.0.0/8, 192.168.0.0/16'},
])
| 31.21519 | 89 | 0.577859 | 323 | 2,466 | 4.28483 | 0.263158 | 0.060694 | 0.047688 | 0.028902 | 0.580202 | 0.580202 | 0.580202 | 0.562139 | 0.478324 | 0.478324 | 0 | 0.098599 | 0.276156 | 2,466 | 78 | 90 | 31.615385 | 0.676751 | 0.015815 | 0 | 0.369231 | 0 | 0.030769 | 0.483299 | 0.092371 | 0 | 0 | 0 | 0 | 0.123077 | 1 | 0.046154 | false | 0 | 0.030769 | 0 | 0.092308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ad4c104b5f4d8ba75908f36b7e9691afc26ddd8 | 5,751 | py | Python | src/sentry/snuba/tasks.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/snuba/tasks.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/snuba/tasks.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import json
from sentry.api.event_search import get_filter
from sentry.models import Environment
from sentry.snuba.discover import resolve_discover_aliases
from sentry.snuba.models import (
QueryAggregations,
QueryDatasets,
QuerySubscription,
query_aggregation_to_snuba,
)
from sentry.tasks.base import instrumented_task
from sentry.utils import metrics
from sentry.utils.snuba import _snuba_pool, SnubaError
# TODO: If we want to support security events here we'll need a way to
# differentiate within the dataset. For now we can just assume all subscriptions
# created within this dataset are just for errors.
DATASET_CONDITIONS = {QueryDatasets.EVENTS: [["type", "=", "error"]]}
def apply_dataset_conditions(dataset, conditions):
if dataset in DATASET_CONDITIONS:
conditions = conditions + DATASET_CONDITIONS[dataset]
return conditions
@instrumented_task(
name="sentry.snuba.tasks.create_subscription_in_snuba",
queue="subscriptions",
default_retry_delay=5,
max_retries=5,
)
def create_subscription_in_snuba(query_subscription_id):
"""
Task to create a corresponding subscription in Snuba from a `QuerySubscription` in
Sentry. We store the snuba subscription id locally on success.
"""
try:
subscription = QuerySubscription.objects.get(id=query_subscription_id)
except QuerySubscription.DoesNotExist:
metrics.incr("snuba.subscriptions.create.subscription_does_not_exist")
return
if subscription.status != QuerySubscription.Status.CREATING.value:
metrics.incr("snuba.subscriptions.create.incorrect_status")
return
if subscription.subscription_id is not None:
metrics.incr("snuba.subscriptions.create.already_created_in_snuba")
return
subscription_id = _create_in_snuba(subscription)
subscription.update(
status=QuerySubscription.Status.ACTIVE.value, subscription_id=subscription_id
)
@instrumented_task(
name="sentry.snuba.tasks.update_subscription_in_snuba",
queue="subscriptions",
default_retry_delay=5,
max_retries=5,
)
def update_subscription_in_snuba(query_subscription_id):
"""
Task to update a corresponding subscription in Snuba from a `QuerySubscription` in
Sentry. Updating in Snuba means deleting the existing subscription, then creating a
new one.
"""
try:
subscription = QuerySubscription.objects.get(id=query_subscription_id)
except QuerySubscription.DoesNotExist:
metrics.incr("snuba.subscriptions.update.subscription_does_not_exist")
return
if subscription.status != QuerySubscription.Status.UPDATING.value:
metrics.incr("snuba.subscriptions.update.incorrect_status")
return
if subscription.subscription_id is not None:
_delete_from_snuba(QueryDatasets(subscription.dataset), subscription.subscription_id)
subscription_id = _create_in_snuba(subscription)
subscription.update(
status=QuerySubscription.Status.ACTIVE.value, subscription_id=subscription_id
)
@instrumented_task(
name="sentry.snuba.tasks.delete_subscription_from_snuba",
queue="subscriptions",
default_retry_delay=5,
max_retries=5,
)
def delete_subscription_from_snuba(query_subscription_id):
"""
Task to delete a corresponding subscription in Snuba from a `QuerySubscription` in
Sentry. Deletes the local subscription once we've successfully removed from Snuba.
"""
try:
subscription = QuerySubscription.objects.get(id=query_subscription_id)
except QuerySubscription.DoesNotExist:
metrics.incr("snuba.subscriptions.delete.subscription_does_not_exist")
return
if subscription.status != QuerySubscription.Status.DELETING.value:
metrics.incr("snuba.subscriptions.delete.incorrect_status")
return
if subscription.subscription_id is not None:
_delete_from_snuba(QueryDatasets(subscription.dataset), subscription.subscription_id)
subscription.delete()
def _create_in_snuba(subscription):
conditions = resolve_discover_aliases(get_filter(subscription.query))[0].conditions
try:
environment = subscription.environments.all()[:1].get()
except Environment.DoesNotExist:
environment = None
if environment:
conditions.append(["environment", "=", environment.name])
conditions = apply_dataset_conditions(QueryDatasets(subscription.dataset), conditions)
response = _snuba_pool.urlopen(
"POST",
"/%s/subscriptions" % (subscription.dataset,),
body=json.dumps(
{
"project_id": subscription.project_id,
"dataset": subscription.dataset,
# We only care about conditions here. Filter keys only matter for
# filtering to project and groups. Projects are handled with an
# explicit param, and groups can't be queried here.
"conditions": conditions,
"aggregations": [
query_aggregation_to_snuba[QueryAggregations(subscription.aggregation)]
],
"time_window": subscription.time_window,
"resolution": subscription.resolution,
}
),
)
if response.status != 202:
raise SnubaError("HTTP %s response from Snuba!" % response.status)
return json.loads(response.data)["subscription_id"]
def _delete_from_snuba(dataset, subscription_id):
response = _snuba_pool.urlopen(
"DELETE", "/%s/subscriptions/%s" % (dataset.value, subscription_id)
)
if response.status != 202:
raise SnubaError("HTTP %s response from Snuba!" % response.status)
| 36.398734 | 93 | 0.722309 | 630 | 5,751 | 6.395238 | 0.236508 | 0.072971 | 0.033011 | 0.050385 | 0.521966 | 0.479027 | 0.462646 | 0.462646 | 0.440804 | 0.440804 | 0 | 0.003028 | 0.195966 | 5,751 | 157 | 94 | 36.630573 | 0.868296 | 0.149713 | 0 | 0.403509 | 0 | 0 | 0.150114 | 0.10056 | 0 | 0 | 0 | 0.006369 | 0 | 1 | 0.052632 | false | 0 | 0.078947 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ad78a3a11071c4e51390ac33b8d72e73907054a | 35,729 | py | Python | haystack/document_stores/opensearch.py | deepset-ai/Haystack | 4a63707f1a177123c13929eb316d3ecaa7fd6c5f | [
"Apache-2.0"
] | null | null | null | haystack/document_stores/opensearch.py | deepset-ai/Haystack | 4a63707f1a177123c13929eb316d3ecaa7fd6c5f | [
"Apache-2.0"
] | null | null | null | haystack/document_stores/opensearch.py | deepset-ai/Haystack | 4a63707f1a177123c13929eb316d3ecaa7fd6c5f | [
"Apache-2.0"
] | 1 | 2022-02-17T05:08:53.000Z | 2022-02-17T05:08:53.000Z | from typing import List, Optional, Union, Dict, Any
import logging
from copy import deepcopy
import numpy as np
from tqdm.auto import tqdm
try:
from elasticsearch.helpers import bulk
from elasticsearch.exceptions import RequestError
except (ImportError, ModuleNotFoundError) as ie:
from haystack.utils.import_utils import _optional_component_not_installed
_optional_component_not_installed(__name__, "elasticsearch", ie)
from haystack.schema import Document
from haystack.document_stores.base import get_batches_from_generator
from haystack.document_stores.filter_utils import LogicalFilterClause
from .elasticsearch import ElasticsearchDocumentStore
logger = logging.getLogger(__name__)
class OpenSearchDocumentStore(ElasticsearchDocumentStore):
def __init__(
self,
scheme: str = "https", # Mind this different default param
username: str = "admin", # Mind this different default param
password: str = "admin", # Mind this different default param
host: Union[str, List[str]] = "localhost",
port: Union[int, List[int]] = 9200,
api_key_id: Optional[str] = None,
api_key: Optional[str] = None,
aws4auth=None,
index: str = "document",
label_index: str = "label",
search_fields: Union[str, list] = "content",
content_field: str = "content",
name_field: str = "name",
embedding_field: str = "embedding",
embedding_dim: int = 768,
custom_mapping: Optional[dict] = None,
excluded_meta_data: Optional[list] = None,
analyzer: str = "standard",
ca_certs: Optional[str] = None,
verify_certs: bool = False, # Mind this different default param
recreate_index: bool = False,
create_index: bool = True,
refresh_type: str = "wait_for",
similarity: str = "dot_product",
timeout: int = 30,
return_embedding: bool = False,
duplicate_documents: str = "overwrite",
index_type: str = "flat",
scroll: str = "1d",
skip_missing_embeddings: bool = True,
synonyms: Optional[List] = None,
synonym_type: str = "synonym",
use_system_proxy: bool = False,
):
"""
Document Store using OpenSearch (https://opensearch.org/). It is compatible with the AWS Elasticsearch Service.
In addition to native Elasticsearch query & filtering, it provides efficient vector similarity search using
the KNN plugin that can scale to a large number of documents.
:param host: url(s) of elasticsearch nodes
:param port: port(s) of elasticsearch nodes
:param username: username (standard authentication via http_auth)
:param password: password (standard authentication via http_auth)
:param api_key_id: ID of the API key (altenative authentication mode to the above http_auth)
:param api_key: Secret value of the API key (altenative authentication mode to the above http_auth)
:param aws4auth: Authentication for usage with aws elasticsearch (can be generated with the requests-aws4auth package)
:param index: Name of index in elasticsearch to use for storing the documents that we want to search. If not existing yet, we will create one.
:param label_index: Name of index in elasticsearch to use for storing labels. If not existing yet, we will create one.
:param search_fields: Name of fields used by BM25Retriever to find matches in the docs to our incoming query (using elastic's multi_match query), e.g. ["title", "full_text"]
:param content_field: Name of field that might contain the answer and will therefore be passed to the Reader Model (e.g. "full_text").
If no Reader is used (e.g. in FAQ-Style QA) the plain content of this field will just be returned.
:param name_field: Name of field that contains the title of the the doc
:param embedding_field: Name of field containing an embedding vector (Only needed when using a dense retriever (e.g. DensePassageRetriever, EmbeddingRetriever) on top)
Note, that in OpenSearch the similarity type for efficient approximate vector similarity calculations is tied to the embedding field's data type which cannot be changed after creation.
:param embedding_dim: Dimensionality of embedding vector (Only needed when using a dense retriever (e.g. DensePassageRetriever, EmbeddingRetriever) on top)
:param custom_mapping: If you want to use your own custom mapping for creating a new index in Elasticsearch, you can supply it here as a dictionary.
:param analyzer: Specify the default analyzer from one of the built-ins when creating a new Elasticsearch Index.
Elasticsearch also has built-in analyzers for different languages (e.g. impacting tokenization). More info at:
https://www.elastic.co/guide/en/elasticsearch/reference/7.9/analysis-analyzers.html
:param excluded_meta_data: Name of fields in Elasticsearch that should not be returned (e.g. [field_one, field_two]).
Helpful if you have fields with long, irrelevant content that you don't want to display in results (e.g. embedding vectors).
:param scheme: 'https' or 'http', protocol used to connect to your elasticsearch instance
:param ca_certs: Root certificates for SSL: it is a path to certificate authority (CA) certs on disk. You can use certifi package with certifi.where() to find where the CA certs file is located in your machine.
:param verify_certs: Whether to be strict about ca certificates
:param create_index: Whether to try creating a new index (If the index of that name is already existing, we will just continue in any case
:param refresh_type: Type of ES refresh used to control when changes made by a request (e.g. bulk) are made visible to search.
If set to 'wait_for', continue only after changes are visible (slow, but safe).
If set to 'false', continue directly (fast, but sometimes unintuitive behaviour when docs are not immediately available after ingestion).
More info at https://www.elastic.co/guide/en/elasticsearch/reference/6.8/docs-refresh.html
:param similarity: The similarity function used to compare document vectors. 'dot_product' is the default since it is
more performant with DPR embeddings. 'cosine' is recommended if you are using a Sentence BERT model.
Note, that the use of efficient approximate vector calculations in OpenSearch is tied to embedding_field's data type which cannot be changed after creation.
You won't be able to use approximate vector calculations on an embedding_field which was created with a different similarity value.
In such cases a fallback to exact but slow vector calculations will happen and a warning will be displayed.
:param timeout: Number of seconds after which an ElasticSearch request times out.
:param return_embedding: To return document embedding
:param duplicate_documents: Handle duplicates document based on parameter options.
Parameter options : ( 'skip','overwrite','fail')
skip: Ignore the duplicates documents
overwrite: Update any existing documents with the same ID when adding documents.
fail: an error is raised if the document ID of the document being added already
exists.
:param index_type: The type of index to be created. Choose from 'flat' and 'hnsw'.
As OpenSearch currently does not support all similarity functions (e.g. dot_product) in exact vector similarity calculations,
we don't make use of exact vector similarity when index_type='flat'. Instead we use the same approximate vector similarity calculations like in 'hnsw', but further optimized for accuracy.
Exact vector similarity is only used as fallback when there's a mismatch between certain requested and indexed similarity types.
In these cases however, a warning will be displayed. See similarity param for more information.
:param scroll: Determines how long the current index is fixed, e.g. during updating all documents with embeddings.
Defaults to "1d" and should not be larger than this. Can also be in minutes "5m" or hours "15h"
For details, see https://www.elastic.co/guide/en/elasticsearch/reference/current/scroll-api.html
:param skip_missing_embeddings: Parameter to control queries based on vector similarity when indexed documents miss embeddings.
Parameter options: (True, False)
False: Raises exception if one or more documents do not have embeddings at query time
True: Query will ignore all documents without embeddings (recommended if you concurrently index and query)
:param synonyms: List of synonyms can be passed while elasticsearch initialization.
For example: [ "foo, bar => baz",
"foozball , foosball" ]
More info at https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-synonym-tokenfilter.html
:param synonym_type: Synonym filter type can be passed.
Synonym or Synonym_graph to handle synonyms, including multi-word synonyms correctly during the analysis process.
More info at https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-synonym-graph-tokenfilter.html
"""
self.embeddings_field_supports_similarity = False
self.similarity_to_space_type = {"cosine": "cosinesimil", "dot_product": "innerproduct", "l2": "l2"}
self.space_type_to_similarity = {v: k for k, v in self.similarity_to_space_type.items()}
super().__init__(
scheme=scheme,
username=username,
password=password,
host=host,
port=port,
api_key_id=api_key_id,
api_key=api_key,
aws4auth=aws4auth,
index=index,
label_index=label_index,
search_fields=search_fields,
content_field=content_field,
name_field=name_field,
embedding_field=embedding_field,
embedding_dim=embedding_dim,
custom_mapping=custom_mapping,
excluded_meta_data=excluded_meta_data,
analyzer=analyzer,
ca_certs=ca_certs,
verify_certs=verify_certs,
recreate_index=recreate_index,
create_index=create_index,
refresh_type=refresh_type,
similarity=similarity,
timeout=timeout,
return_embedding=return_embedding,
duplicate_documents=duplicate_documents,
index_type=index_type,
scroll=scroll,
skip_missing_embeddings=skip_missing_embeddings,
synonyms=synonyms,
synonym_type=synonym_type,
use_system_proxy=use_system_proxy,
)
def query_by_embedding(
self,
query_emb: np.ndarray,
filters: Optional[Dict[str, Union[Dict, List, str, int, float, bool]]] = None,
top_k: int = 10,
index: Optional[str] = None,
return_embedding: Optional[bool] = None,
headers: Optional[Dict[str, str]] = None,
scale_score: bool = True,
) -> List[Document]:
"""
Find the document that is most similar to the provided `query_emb` by using a vector similarity metric.
:param query_emb: Embedding of the query (e.g. gathered from DPR)
:param filters: Optional filters to narrow down the search space to documents whose metadata fulfill certain
conditions.
Filters are defined as nested dictionaries. The keys of the dictionaries can be a logical
operator (`"$and"`, `"$or"`, `"$not"`), a comparison operator (`"$eq"`, `"$in"`, `"$gt"`,
`"$gte"`, `"$lt"`, `"$lte"`) or a metadata field name.
Logical operator keys take a dictionary of metadata field names and/or logical operators as
value. Metadata field names take a dictionary of comparison operators as value. Comparison
operator keys take a single value or (in case of `"$in"`) a list of values as value.
If no logical operator is provided, `"$and"` is used as default operation. If no comparison
operator is provided, `"$eq"` (or `"$in"` if the comparison value is a list) is used as default
operation.
__Example__:
```python
filters = {
"$and": {
"type": {"$eq": "article"},
"date": {"$gte": "2015-01-01", "$lt": "2021-01-01"},
"rating": {"$gte": 3},
"$or": {
"genre": {"$in": ["economy", "politics"]},
"publisher": {"$eq": "nytimes"}
}
}
}
# or simpler using default operators
filters = {
"type": "article",
"date": {"$gte": "2015-01-01", "$lt": "2021-01-01"},
"rating": {"$gte": 3},
"$or": {
"genre": ["economy", "politics"],
"publisher": "nytimes"
}
}
```
To use the same logical operator multiple times on the same level, logical operators take
optionally a list of dictionaries as value.
__Example__:
```python
filters = {
"$or": [
{
"$and": {
"Type": "News Paper",
"Date": {
"$lt": "2019-01-01"
}
}
},
{
"$and": {
"Type": "Blog Post",
"Date": {
"$gte": "2019-01-01"
}
}
}
]
}
```
:param top_k: How many documents to return
:param index: Index name for storing the docs and metadata
:param return_embedding: To return document embedding
:param headers: Custom HTTP headers to pass to elasticsearch client (e.g. {'Authorization': 'Basic YWRtaW46cm9vdA=='})
Check out https://www.elastic.co/guide/en/elasticsearch/reference/current/http-clients.html for more information.
:param scale_score: Whether to scale the similarity score to the unit interval (range of [0,1]).
If true (default) similarity scores (e.g. cosine or dot_product) which naturally have a different value range will be scaled to a range of [0,1], where 1 means extremely relevant.
Otherwise raw similarity scores (e.g. cosine or dot_product) will be used.
:return:
"""
if index is None:
index = self.index
if return_embedding is None:
return_embedding = self.return_embedding
if not self.embedding_field:
raise RuntimeError("Please specify arg `embedding_field` in ElasticsearchDocumentStore()")
# +1 in similarity to avoid negative numbers (for cosine sim)
body: Dict[str, Any] = {"size": top_k, "query": self._get_vector_similarity_query(query_emb, top_k)}
if filters:
body["query"]["bool"]["filter"] = LogicalFilterClause.parse(filters).convert_to_elasticsearch()
excluded_meta_data: Optional[list] = None
if self.excluded_meta_data:
excluded_meta_data = deepcopy(self.excluded_meta_data)
if return_embedding is True and self.embedding_field in excluded_meta_data:
excluded_meta_data.remove(self.embedding_field)
elif return_embedding is False and self.embedding_field not in excluded_meta_data:
excluded_meta_data.append(self.embedding_field)
elif return_embedding is False:
excluded_meta_data = [self.embedding_field]
if excluded_meta_data:
body["_source"] = {"excludes": excluded_meta_data}
logger.debug(f"Retriever query: {body}")
result = self.client.search(index=index, body=body, request_timeout=300, headers=headers)["hits"]["hits"]
documents = [
self._convert_es_hit_to_document(
hit, adapt_score_for_embedding=True, return_embedding=return_embedding, scale_score=scale_score
)
for hit in result
]
return documents
def _create_document_index(self, index_name: str, headers: Optional[Dict[str, str]] = None):
"""
Create a new index for storing documents.
"""
# Check if index_name refers to an alias
if self.client.indices.exists_alias(name=index_name):
logger.debug(f"Index name {index_name} is an alias.")
# check if the existing index has the embedding field; if not create it
if self.client.indices.exists(index=index_name, headers=headers):
indices = self.client.indices.get(index_name, headers=headers)
# If the index name is an alias that groups multiple existing indices, each of them must have an embedding_field.
for index_id, index_info in indices.items():
mappings = index_info["mappings"]
index_settings = index_info["settings"]["index"]
if self.search_fields:
for search_field in self.search_fields:
if (
search_field in mappings["properties"]
and mappings["properties"][search_field]["type"] != "text"
):
raise Exception(
f"The search_field '{search_field}' of index '{index_id}' with type '{mappings['properties'][search_field]['type']}' "
f"does not have the right type 'text' to be queried in fulltext search. Please use only 'text' type properties as search_fields or use another index. "
f"This error might occur if you are trying to use haystack 1.0 and above with an existing elasticsearch index created with a previous version of haystack. "
f'In this case deleting the index with `delete_index(index="{index_id}")` will fix your environment. '
f"Note, that all data stored in the index will be lost!"
)
# embedding field will be created
if self.embedding_field not in mappings["properties"]:
mappings["properties"][self.embedding_field] = self._get_embedding_field_mapping(
similarity=self.similarity
)
self.client.indices.put_mapping(index=index_id, body=mappings, headers=headers)
self.embeddings_field_supports_similarity = True
else:
# bad embedding field
if mappings["properties"][self.embedding_field]["type"] != "knn_vector":
raise Exception(
f"The '{index_id}' index in OpenSearch already has a field called '{self.embedding_field}'"
f" with the type '{mappings['properties'][self.embedding_field]['type']}'. Please update the "
f"document_store to use a different name for the embedding_field parameter."
)
# embedding field with global space_type setting
if "method" not in mappings["properties"][self.embedding_field]:
embedding_field_space_type = index_settings["knn.space_type"]
# embedding field with local space_type setting
else:
# embedding field with global space_type setting
if "method" not in mappings["properties"][self.embedding_field]:
embedding_field_space_type = index_settings["knn.space_type"]
# embedding field with local space_type setting
else:
embedding_field_space_type = mappings["properties"][self.embedding_field]["method"][
"space_type"
]
embedding_field_similarity = self.space_type_to_similarity[embedding_field_space_type]
if embedding_field_similarity == self.similarity:
self.embeddings_field_supports_similarity = True
else:
logger.warning(
f"Embedding field '{self.embedding_field}' is optimized for similarity '{embedding_field_similarity}'. "
f"Falling back to slow exact vector calculation. "
f"Consider cloning the embedding field optimized for '{embedding_field_similarity}' by calling clone_embedding_field(similarity='{embedding_field_similarity}', ...) "
f"or creating a new index optimized for '{self.similarity}' by setting `similarity='{self.similarity}'` the first time you instantiate OpenSearchDocumentStore for the new index, "
f"e.g. `OpenSearchDocumentStore(index='my_new_{self.similarity}_index', similarity='{self.similarity}')`."
)
# Adjust global ef_search setting. If not set, default is 512.
ef_search = index_settings.get("knn.algo_param", {"ef_search": 512}).get("ef_search", 512)
if self.index_type == "hnsw" and ef_search != 20:
body = {"knn.algo_param.ef_search": 20}
self.client.indices.put_settings(index=index_id, body=body, headers=headers)
elif self.index_type == "flat" and ef_search != 512:
body = {"knn.algo_param.ef_search": 512}
self.client.indices.put_settings(index=index_id, body=body, headers=headers)
return
if self.custom_mapping:
index_definition = self.custom_mapping
else:
index_definition = {
"mappings": {
"properties": {self.name_field: {"type": "keyword"}, self.content_field: {"type": "text"}},
"dynamic_templates": [
{"strings": {"path_match": "*", "match_mapping_type": "string", "mapping": {"type": "keyword"}}}
],
},
"settings": {"analysis": {"analyzer": {"default": {"type": self.analyzer}}}},
}
if self.synonyms:
for field in self.search_fields:
index_definition["mappings"]["properties"].update({field: {"type": "text", "analyzer": "synonym"}})
index_definition["mappings"]["properties"][self.content_field] = {"type": "text", "analyzer": "synonym"}
index_definition["settings"]["analysis"]["analyzer"]["synonym"] = {
"tokenizer": "whitespace",
"filter": ["lowercase", "synonym"],
}
index_definition["settings"]["analysis"]["filter"] = {
"synonym": {"type": self.synonym_type, "synonyms": self.synonyms}
}
else:
for field in self.search_fields:
index_definition["mappings"]["properties"].update({field: {"type": "text"}})
if self.embedding_field:
index_definition["settings"]["index"] = {"knn": True}
if self.index_type == "hnsw":
index_definition["settings"]["index"]["knn.algo_param.ef_search"] = 20
index_definition["mappings"]["properties"][self.embedding_field] = self._get_embedding_field_mapping(
similarity=self.similarity
)
try:
self.client.indices.create(index=index_name, body=index_definition, headers=headers)
except RequestError as e:
# With multiple workers we need to avoid race conditions, where:
# - there's no index in the beginning
# - both want to create one
# - one fails as the other one already created it
if not self.client.indices.exists(index=index_name, headers=headers):
raise e
def _get_embedding_field_mapping(self, similarity: str):
space_type = self.similarity_to_space_type[similarity]
method: dict = {"space_type": space_type, "name": "hnsw", "engine": "nmslib"}
if self.index_type == "flat":
# use default parameters from https://opensearch.org/docs/1.2/search-plugins/knn/knn-index/
# we need to set them explicitly as aws managed instances starting from version 1.2 do not support empty parameters
method["parameters"] = {"ef_construction": 512, "m": 16}
elif self.index_type == "hnsw":
method["parameters"] = {"ef_construction": 80, "m": 64}
else:
logger.error("Please set index_type to either 'flat' or 'hnsw'")
embeddings_field_mapping = {"type": "knn_vector", "dimension": self.embedding_dim, "method": method}
return embeddings_field_mapping
def _create_label_index(self, index_name: str, headers: Optional[Dict[str, str]] = None):
if self.client.indices.exists(index=index_name, headers=headers):
return
mapping = {
"mappings": {
"properties": {
"query": {"type": "text"},
"answer": {
"type": "nested"
}, # In elasticsearch we use type:flattened, but this is not supported in opensearch
"document": {"type": "nested"},
"is_correct_answer": {"type": "boolean"},
"is_correct_document": {"type": "boolean"},
"origin": {"type": "keyword"}, # e.g. user-feedback or gold-label
"document_id": {"type": "keyword"},
"no_answer": {"type": "boolean"},
"pipeline_id": {"type": "keyword"},
"created_at": {"type": "date", "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"},
"updated_at": {"type": "date", "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"}
# TODO add pipeline_hash and pipeline_name once we migrated the REST API to pipelines
}
}
}
try:
self.client.indices.create(index=index_name, body=mapping, headers=headers)
except RequestError as e:
# With multiple workers we need to avoid race conditions, where:
# - there's no index in the beginning
# - both want to create one
# - one fails as the other one already created it
if not self.client.indices.exists(index=index_name, headers=headers):
raise e
def _get_vector_similarity_query(self, query_emb: np.ndarray, top_k: int):
"""
Generate Elasticsearch query for vector similarity.
"""
if self.embeddings_field_supports_similarity:
query: dict = {
"bool": {"must": [{"knn": {self.embedding_field: {"vector": query_emb.tolist(), "k": top_k}}}]}
}
else:
# if we do not have a proper similarity field we have to fall back to exact but slow vector similarity calculation
query = {
"script_score": {
"query": {"match_all": {}},
"script": {
"source": "knn_score",
"lang": "knn",
"params": {
"field": self.embedding_field,
"query_value": query_emb.tolist(),
"space_type": self.similarity_to_space_type[self.similarity],
},
},
}
}
return query
def _get_raw_similarity_score(self, score):
# adjust scores according to https://opensearch.org/docs/latest/search-plugins/knn/approximate-knn
# and https://opensearch.org/docs/latest/search-plugins/knn/knn-score-script/
if self.similarity == "dot_product":
if score > 1:
score = score - 1
else:
score = -(1 / score - 1)
elif self.similarity == "l2":
score = 1 / score - 1
elif self.similarity == "cosine":
if self.embeddings_field_supports_similarity:
score = -(1 / score - 2)
else:
score = score - 1
return score
def clone_embedding_field(
self,
new_embedding_field: str,
similarity: str,
batch_size: int = 10_000,
headers: Optional[Dict[str, str]] = None,
):
mapping = self.client.indices.get(self.index, headers=headers)[self.index]["mappings"]
if new_embedding_field in mapping["properties"]:
raise Exception(
f"{new_embedding_field} already exists with mapping {mapping['properties'][new_embedding_field]}"
)
mapping["properties"][new_embedding_field] = self._get_embedding_field_mapping(similarity=similarity)
self.client.indices.put_mapping(index=self.index, body=mapping, headers=headers)
document_count = self.get_document_count(headers=headers)
result = self._get_all_documents_in_index(index=self.index, batch_size=batch_size, headers=headers)
logging.getLogger("elasticsearch").setLevel(logging.CRITICAL)
with tqdm(total=document_count, position=0, unit=" Docs", desc="Cloning embeddings") as progress_bar:
for result_batch in get_batches_from_generator(result, batch_size):
document_batch = [self._convert_es_hit_to_document(hit, return_embedding=True) for hit in result_batch]
doc_updates = []
for doc in document_batch:
if doc.embedding is not None:
update = {
"_op_type": "update",
"_index": self.index,
"_id": doc.id,
"doc": {new_embedding_field: doc.embedding.tolist()},
}
doc_updates.append(update)
bulk(self.client, doc_updates, request_timeout=300, refresh=self.refresh_type, headers=headers)
progress_bar.update(batch_size)
class OpenDistroElasticsearchDocumentStore(OpenSearchDocumentStore):
"""
A DocumentStore which has an Open Distro for Elasticsearch service behind it.
"""
def __init__(
self,
scheme: str = "https",
username: str = "admin",
password: str = "admin",
host: Union[str, List[str]] = "localhost",
port: Union[int, List[int]] = 9200,
api_key_id: Optional[str] = None,
api_key: Optional[str] = None,
aws4auth=None,
index: str = "document",
label_index: str = "label",
search_fields: Union[str, list] = "content",
content_field: str = "content",
name_field: str = "name",
embedding_field: str = "embedding",
embedding_dim: int = 768,
custom_mapping: Optional[dict] = None,
excluded_meta_data: Optional[list] = None,
analyzer: str = "standard",
ca_certs: Optional[str] = None,
verify_certs: bool = False,
recreate_index: bool = False,
create_index: bool = True,
refresh_type: str = "wait_for",
similarity: str = "cosine", # Mind this different default param
timeout: int = 30,
return_embedding: bool = False,
duplicate_documents: str = "overwrite",
index_type: str = "flat",
scroll: str = "1d",
skip_missing_embeddings: bool = True,
synonyms: Optional[List] = None,
synonym_type: str = "synonym",
use_system_proxy: bool = False,
):
logger.warning(
"Open Distro for Elasticsearch has been replaced by OpenSearch! "
"See https://opensearch.org/faq/ for details. "
"We recommend using the OpenSearchDocumentStore instead."
)
super().__init__(
scheme=scheme,
username=username,
password=password,
host=host,
port=port,
api_key_id=api_key_id,
api_key=api_key,
aws4auth=aws4auth,
index=index,
label_index=label_index,
search_fields=search_fields,
content_field=content_field,
name_field=name_field,
embedding_field=embedding_field,
embedding_dim=embedding_dim,
custom_mapping=custom_mapping,
excluded_meta_data=excluded_meta_data,
analyzer=analyzer,
ca_certs=ca_certs,
verify_certs=verify_certs,
recreate_index=recreate_index,
create_index=create_index,
refresh_type=refresh_type,
similarity=similarity,
timeout=timeout,
return_embedding=return_embedding,
duplicate_documents=duplicate_documents,
index_type=index_type,
scroll=scroll,
skip_missing_embeddings=skip_missing_embeddings,
synonyms=synonyms,
synonym_type=synonym_type,
use_system_proxy=use_system_proxy,
)
| 55.826563 | 218 | 0.576031 | 3,881 | 35,729 | 5.149704 | 0.168256 | 0.04343 | 0.017112 | 0.010858 | 0.39973 | 0.345792 | 0.318373 | 0.290103 | 0.270089 | 0.25678 | 0 | 0.00674 | 0.339724 | 35,729 | 639 | 219 | 55.913928 | 0.840448 | 0.372806 | 0 | 0.461165 | 0 | 0.012136 | 0.16761 | 0.033965 | 0 | 0 | 0 | 0.001565 | 0 | 1 | 0.021845 | false | 0.009709 | 0.031553 | 0 | 0.072816 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2adb1c088edb27e4836b840c293ec395556a0d8a | 11,591 | py | Python | server/src/weblab/admin/web/app.py | zstars/weblabdeusto | 09bd9d93d483671bca67ee5c70a9c412eb5d352f | [
"BSD-2-Clause"
] | null | null | null | server/src/weblab/admin/web/app.py | zstars/weblabdeusto | 09bd9d93d483671bca67ee5c70a9c412eb5d352f | [
"BSD-2-Clause"
] | null | null | null | server/src/weblab/admin/web/app.py | zstars/weblabdeusto | 09bd9d93d483671bca67ee5c70a9c412eb5d352f | [
"BSD-2-Clause"
] | null | null | null | import os
import sys
import urlparse
import traceback
from sqlalchemy.orm import scoped_session, sessionmaker
from flask import Flask, request, redirect, url_for, escape
from flask.ext.admin import Admin, BaseView, expose
if __name__ == '__main__':
sys.path.insert(0, '.')
from weblab.core.exc import SessionNotFoundError
import weblab.core.server
import weblab.configuration_doc as configuration_doc
from weblab.data import ValidDatabaseSessionId
from weblab.db import db
import weblab.admin.web as web
import weblab.admin.web.admin_views as admin_views
import weblab.admin.web.profile_views as profile_views
import weblab.admin.web.instructor_views as instructor_views
from weblab.core.wl import weblab_api
class BackView(BaseView):
@expose()
def index(self):
return redirect(request.url.split('/weblab/administration')[0] + '/weblab/client')
class RedirectView(BaseView):
def __init__(self, url_token, *args, **kwargs):
self.url_token = url_token
super(RedirectView, self).__init__(*args, **kwargs)
@expose()
def index(self):
return redirect(url_for(self.url_token))
GLOBAL_APP_INSTANCE = None
class AdministrationApplication(object):
def __init__(self, app, cfg_manager, ups, bypass_authz = False):
super(AdministrationApplication, self).__init__()
import weblab.admin.web.app as app_module
app_module.GLOBAL_APP_INSTANCE = self
self.cfg_manager = cfg_manager
db.initialize(cfg_manager)
self.ups = ups
db_session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=db.engine))
files_directory = cfg_manager.get_doc_value(configuration_doc.CORE_STORE_STUDENTS_PROGRAMS_PATH)
core_server_url = cfg_manager.get_value( 'core_server_url', '' )
self.script_name = urlparse.urlparse(core_server_url).path.split('/weblab')[0] or ''
self.app = app
static_folder = os.path.abspath(os.path.join(os.path.dirname(web.__file__), 'static'))
################################################
#
# Administration panel for administrators
#
#
admin_url = '/weblab/administration/admin'
self.admin = Admin(index_view = admin_views.HomeView(db_session, url = admin_url),name = 'WebLab-Deusto Admin', url = admin_url, endpoint = admin_url, base_template = 'weblab-master.html')
self.admin.add_view(admin_views.UsersAddingView(db_session, category = 'General', name = 'Add multiple users', endpoint = 'general/multiple/users'))
self.admin.add_view(admin_views.UsersPanel(db_session, category = 'General', name = 'Users', endpoint = 'general/users'))
self.admin.add_view(admin_views.GroupsPanel(db_session, category = 'General', name = 'Groups', endpoint = 'general/groups'))
self.admin.add_view(admin_views.AuthsPanel(db_session, category = 'General', name = 'Authentication', endpoint = 'general/auth'))
self.admin.add_view(admin_views.UserUsedExperimentPanel(files_directory, db_session, category = 'Logs', name = 'User logs', endpoint = 'logs/users'))
self.admin.add_view(admin_views.ExperimentCategoryPanel(db_session, category = 'Experiments', name = 'Categories', endpoint = 'experiments/categories'))
self.admin.add_view(admin_views.ExperimentPanel(db_session, category = 'Experiments', name = 'Experiments', endpoint = 'experiments/experiments'))
# TODO: Until finished, do not display
# self.admin.add_view(admin_views.SchedulerPanel(db_session, category = 'Experiments', name = 'Schedulers', endpoint = 'experiments/schedulers'))
self.admin.add_view(admin_views.PermissionsAddingView(db_session, category = 'Permissions', name = 'Create', endpoint = 'permissions/create'))
self.admin.add_view(admin_views.UserPermissionPanel(db_session, category = 'Permissions', name = 'User', endpoint = 'permissions/user'))
self.admin.add_view(admin_views.GroupPermissionPanel(db_session, category = 'Permissions', name = 'Group', endpoint = 'permissions/group'))
self.admin.add_view(admin_views.RolePermissionPanel(db_session, category = 'Permissions', name = 'Roles', endpoint = 'permissions/role'))
self.admin.add_view(RedirectView('instructor.index', url = 'instructor', name = 'Instructor panel', endpoint = 'instructor/admin'))
self.admin.add_view(admin_views.MyProfileView(url = 'myprofile', name = 'My profile', endpoint = 'myprofile/admin'))
self.admin.add_view(BackView(url = 'back', name = 'Back', endpoint = 'back/admin'))
self.admin.init_app(self.app)
self.full_admin_url = self.script_name + admin_url
################################################
#
# Profile panel
#
profile_url = '/weblab/administration/profile'
self.profile = Admin(index_view = profile_views.ProfileHomeView(db_session, url = profile_url, endpoint = 'profile'),name = 'WebLab-Deusto profile', url = profile_url, endpoint = profile_url, base_template = 'weblab-master.html')
self.profile.add_view(profile_views.ProfileEditView(db_session, name = 'Edit', endpoint = 'edit'))
self.profile.add_view(profile_views.MyAccessesPanel(files_directory, db_session, name = 'My accesses', endpoint = 'accesses'))
self.profile.add_view(BackView(url = 'back', name = 'Back', endpoint = 'back/profile'))
self.profile.init_app(self.app)
################################################
#
# Instructors panel
#
# TODO. There should be able a new M2M relation between instructors and groups.
#
# Instructor should be able to:
#
# a) Create new groups (of which they are in charge)
# b) Make other instructors in charge of these groups
# c) Add students (and only students) to the system; forcing a group
# d) Edit users (only students; of those groups that the administrator is in charge of)
# e) Assign permissions on these courses
# f) Manage the permissions on these courses
# g) See the logs of their own students
# h) See a panel with analytics of each of these groups (this panel is common to the administrator, and has not been implemented)
instructor_url = '/weblab/administration/instructor'
instructor_home = instructor_views.InstructorHomeView(db_session, url = instructor_url, endpoint = 'instructor')
instructor_home.static_folder = static_folder
self.instructor = Admin(index_view = instructor_home, name = "Weblab-Deusto instructor", url = instructor_url, endpoint = instructor_url, base_template = 'weblab-master.html')
self.instructor.add_view(instructor_views.UsersPanel(db_session, category = 'General', name = 'Users', endpoint = 'users'))
self.instructor.add_view(instructor_views.GroupsPanel(db_session, category = 'General', name = 'Groups', endpoint = 'groups'))
self.instructor.add_view(instructor_views.UserUsedExperimentPanel(db_session, category = 'General', name = 'Raw accesses', endpoint = 'logs'))
self.instructor.add_view(instructor_views.GroupStats(db_session, category = 'Stats', name = 'Group', endpoint = 'stats/groups'))
self.instructor.add_view(BackView(url = 'back', name = 'Back', endpoint = 'back/instructor'))
self.instructor.init_app(self.app)
################################################
#
# Other
#
self.bypass_authz = bypass_authz
def is_admin(self):
if self.bypass_authz:
return True
try:
session_id = (request.cookies.get('weblabsessionid') or '').split('.')[0]
with weblab_api(self.ups, session_id = session_id):
try:
permissions = weblab.core.server.get_user_permissions()
except SessionNotFoundError:
# Gotcha
return False
admin_permissions = [ permission for permission in permissions if permission.name == 'admin_panel_access' ]
if len(admin_permissions) == 0:
return False
if admin_permissions[0].parameters[0].value:
return True
return False
except:
traceback.print_exc()
return False
def get_user_role(self):
if self.bypass_authz:
return 'admin'
try:
session_id = (request.cookies.get('weblabsessionid') or '').split('.')[0]
try:
with weblab_api(self.ups, session_id = session_id):
user_info = weblab.core.server.get_user_information()
except SessionNotFoundError:
# Gotcha
traceback.print_exc()
return None
else:
return user_info.role.name
except:
traceback.print_exc()
return None
def _reserve_fake_session(self):
fake_names = ('student1', 'porduna', 'user7', 'admin')
exc = None
for fake_name in fake_names:
try:
session_id, route = self.ups._reserve_session(ValidDatabaseSessionId(fake_name, 'administrator'))
except Exception as exc:
pass
else:
return session_id, route
raise exc
def get_permissions(self):
if self.bypass_authz:
session_id, _ = self._reserve_fake_session()
with weblab_api(self.ups, session_id = session_id.id):
return weblab.core.server.get_user_permissions()
session_id = (request.cookies.get('weblabsessionid') or '').split('.')[0]
try:
with weblab_api(self.ups, session_id = session_id):
return weblab.core.server.get_user_permissions()
except:
traceback.print_exc()
return None
def get_user_information(self):
if self.bypass_authz:
session_id, _ = self._reserve_fake_session()
with weblab_api(self.ups, session_id = session_id.id):
return weblab.core.server.get_user_information()
session_id = (request.cookies.get('weblabsessionid') or '').split('.')[0]
try:
with weblab_api(self.ups, session_id = session_id):
return weblab.core.server.get_user_information()
except SessionNotFoundError:
return None
#############################################
#
# The code below is only used for testing
#
if __name__ == '__main__':
from voodoo.configuration import ConfigurationManager
from weblab.core.server import UserProcessingServer
cfg_manager = ConfigurationManager()
cfg_manager.append_path('test/unit/configuration.py')
ups = UserProcessingServer(None, None, cfg_manager, dont_start = True)
app = Flask('weblab.core.server')
app.config['SECRET_KEY'] = os.urandom(32)
@app.route("/site-map")
def site_map():
lines = []
for rule in app.url_map.iter_rules():
line = str(escape(repr(rule)))
lines.append(line)
ret = "<br>".join(lines)
return ret
DEBUG = True
admin_app = AdministrationApplication(app, cfg_manager, ups, bypass_authz = True)
@admin_app.app.route('/')
def index():
return redirect('/weblab/administration/admin')
admin_app.app.run(debug=True, host = '0.0.0.0')
| 42.30292 | 237 | 0.646277 | 1,315 | 11,591 | 5.488213 | 0.198479 | 0.022308 | 0.037689 | 0.033255 | 0.366357 | 0.285576 | 0.200499 | 0.15893 | 0.140917 | 0.086601 | 0 | 0.002129 | 0.230092 | 11,591 | 273 | 238 | 42.457875 | 0.806589 | 0.081615 | 0 | 0.321637 | 0 | 0 | 0.112815 | 0.022563 | 0 | 0 | 0 | 0.003663 | 0 | 1 | 0.064327 | false | 0.046784 | 0.116959 | 0.017544 | 0.321637 | 0.023392 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ade1838beeb0bdf818f8a494b2320080e756504 | 2,142 | py | Python | medium/698_partition_to_k_equal_sum_subsets.py | niki4/leetcode_py3 | 794f560a09a8950da21bd58ea222e0c74449ffa6 | [
"MIT"
] | null | null | null | medium/698_partition_to_k_equal_sum_subsets.py | niki4/leetcode_py3 | 794f560a09a8950da21bd58ea222e0c74449ffa6 | [
"MIT"
] | null | null | null | medium/698_partition_to_k_equal_sum_subsets.py | niki4/leetcode_py3 | 794f560a09a8950da21bd58ea222e0c74449ffa6 | [
"MIT"
] | null | null | null | """
Given an array of integers nums and a positive integer k, find whether it's possible to divide this array into
k non-empty subsets whose sums are all equal.
Example 1:
Input: nums = [4, 3, 2, 3, 5, 2, 1], k = 4
Output: True
Explanation: It's possible to divide it into 4 subsets (5), (1, 4), (2,3), (2,3) with equal sums.
Note:
1 <= k <= len(nums) <= 16.
0 < nums[i] < 10000.
"""
from typing import List
class Solution:
"""
Dynamic Programming / DFS with backtracking
Runtime: 48 ms, faster than 77.03% of Python3
Memory Usage: 14.1 MB, less than 98.53% of Python3
Time complexity: O(k ^ n)
Every element in the nums array must be placed in one destined bucket. And for each element in nums array, it has k
choices, so we time k n times, i.e., k * k * k * ... * k. However, pruning unsolvable combination by checking an
empty subset (avoid putting the same useless content into next k - 1 buckets) will improve the performance.
"""
def canPartitionKSubsets(self, nums: List[int], k: int) -> bool:
bucket, k_sum = [0] * k, sum(nums) // k
nums.sort(reverse=True) # starting from larger nums first speed up algorithm
def dfs(idx):
if idx == len(nums):
return len(set(bucket)) == 1
for i in range(k):
bucket[i] += nums[idx]
if bucket[i] <= k_sum and dfs(idx + 1):
return True
bucket[i] -= nums[idx]
"""
The key is, bucket[i] == 0 means for all k > i, sum[k] == 0; because this algorithm always fill the
previous buckets before trying the next.
So if putting nums[i] in this empty bucket can't solve the game, putting nums[i] on other empty
buckets can't solve the game either.
"""
if bucket[i] == 0:
break
return False
return dfs(0)
if __name__ == '__main__':
sol = Solution()
res = sol.canPartitionKSubsets([4, 3, 2, 3, 5, 2, 1], 4)
assert res is True, f"Expected {True}, got {res}"
| 36.305085 | 119 | 0.578898 | 324 | 2,142 | 3.796296 | 0.475309 | 0.028455 | 0.007317 | 0.021138 | 0.068293 | 0.011382 | 0.011382 | 0 | 0 | 0 | 0 | 0.038961 | 0.316993 | 2,142 | 58 | 120 | 36.931034 | 0.801777 | 0.440243 | 0 | 0 | 0 | 0 | 0.043093 | 0 | 0 | 0 | 0 | 0 | 0.047619 | 1 | 0.095238 | false | 0 | 0.047619 | 0 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2adef42eb180cf29e8e8fba70e55404d7fde9b18 | 769 | py | Python | Calls/calls.py | vinodnimbalkar/python-playground | f3e37026017f7d0db1a9b7fb6f938b254bf735fc | [
"MIT"
] | 1 | 2018-12-11T22:56:08.000Z | 2018-12-11T22:56:08.000Z | Calls/calls.py | vinodnimbalkar/python-playground | f3e37026017f7d0db1a9b7fb6f938b254bf735fc | [
"MIT"
] | null | null | null | Calls/calls.py | vinodnimbalkar/python-playground | f3e37026017f7d0db1a9b7fb6f938b254bf735fc | [
"MIT"
] | null | null | null | import os
import csv
basedir = os.path.abspath(os.path.dirname(__file__))
path = basedir + "/CallRecorders/"
lst = os.listdir(path)
with open("call_data.csv","w") as new_file:
fieldnames = ['Mobile No', 'Year', 'Month', 'Date', 'Hour', 'Minute', 'Second', 'Call Type', 'Audio']
csv_writer = csv.DictWriter(new_file, fieldnames=fieldnames, delimiter=",")
csv_writer.writeheader()
for i in lst:
filelink = '=HYPERLINK("'+path+i+'")'
csv_writer.writerow({fieldnames[0]:i.split('_')[0],fieldnames[1]:i.split('_')[1],fieldnames[2]:i.split('_')[2],\
fieldnames[3]:i.split('_')[3],fieldnames[4]:i.split('_')[4],fieldnames[5]:i.split('_')[5],fieldnames[6]:i.split('_')[6],\
fieldnames[7]:i.split('_')[7],fieldnames[8]:filelink}) | 51.266667 | 129 | 0.637191 | 106 | 769 | 4.45283 | 0.462264 | 0.101695 | 0.072034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02526 | 0.124837 | 769 | 15 | 130 | 51.266667 | 0.676077 | 0 | 0 | 0 | 0 | 0 | 0.135065 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2adfe7f8166fdf5dab39fdca843d86e6cad3463a | 4,837 | py | Python | src/games/views.py | vinicius91/django-rest-framework-api | c3fc22eec083c5dac49798cbe89ddc20eb967247 | [
"MIT"
] | 10 | 2019-07-30T17:20:23.000Z | 2021-11-08T13:10:50.000Z | restful_python_section_08/gamesapi/games/views.py | hackeziah/Building-RESTful-Python-Web-Services-with-Django | d795910a09000f07b962a7edad287df0fed2a362 | [
"MIT"
] | null | null | null | restful_python_section_08/gamesapi/games/views.py | hackeziah/Building-RESTful-Python-Web-Services-with-Django | d795910a09000f07b962a7edad287df0fed2a362 | [
"MIT"
] | 4 | 2019-05-19T11:36:31.000Z | 2021-07-13T01:04:56.000Z | from games.models import GameCategory
from games.models import Game
from games.models import Player
from games.models import PlayerScore
from games.serializers import GameCategorySerializer
from games.serializers import GameSerializer
from games.serializers import PlayerSerializer
from games.serializers import PlayerScoreSerializer
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.reverse import reverse
from django.contrib.auth.models import User
from games.serializers import UserSerializer
from rest_framework import permissions
from games.permissions import IsOwnerOrReadOnly
from rest_framework.throttling import ScopedRateThrottle
from rest_framework import filters
from django_filters import NumberFilter, DateTimeFilter, AllValuesFilter
class UserList(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
name = 'user-list'
class UserDetail(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
name = 'user-detail'
class GameCategoryList(generics.ListCreateAPIView):
queryset = GameCategory.objects.all()
serializer_class = GameCategorySerializer
name = 'gamecategory-list'
throttle_scope = 'game-categories'
throttle_classes = (ScopedRateThrottle,)
filter_fields = ('name',)
search_fields = ('^name',)
ordering_fields = ('name',)
class GameCategoryDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = GameCategory.objects.all()
serializer_class = GameCategorySerializer
name = 'gamecategory-detail'
throttle_scope = 'game-categories'
throttle_classes = (ScopedRateThrottle,)
class GameList(generics.ListCreateAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
name = 'game-list'
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly,
)
filter_fields = (
'name',
'game_category',
'release_date',
'played',
'owner',
)
search_fields = (
'^name',
)
ordering_fields = (
'name',
'release_date',
)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class GameDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
name = 'game-detail'
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly)
class PlayerList(generics.ListCreateAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
name = 'player-list'
filter_fields = (
'name',
'gender',
)
search_fields = (
'^name',
)
ordering_fields = (
'name',
)
class PlayerDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
name = 'player-detail'
class PlayerScoreFilter(filters.FilterSet):
min_score = NumberFilter(
name='score', lookup_expr='gte')
max_score = NumberFilter(
name='score', lookup_expr='lte')
from_score_date = DateTimeFilter(
name='score_date', lookup_expr='gte')
to_score_date = DateTimeFilter(
name='score_date', lookup_expr='lte')
player_name = AllValuesFilter(
name='player__name')
game_name = AllValuesFilter(
name='game__name')
class Meta:
model = PlayerScore
fields = (
'score',
'from_score_date',
'to_score_date',
'min_score',
'max_score',
#player__name will be accessed as player_name
'player_name',
#game__name will be accessed as game_name
'game_name',
)
class PlayerScoreList(generics.ListCreateAPIView):
queryset = PlayerScore.objects.all()
serializer_class = PlayerScoreSerializer
name = 'playerscore-list'
filter_class = PlayerScoreFilter
ordering_fields = (
'score',
'score_date',
)
class PlayerScoreDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = PlayerScore.objects.all()
serializer_class = PlayerScoreSerializer
name = 'playerscore-detail'
class ApiRoot(generics.GenericAPIView):
name = 'api-root'
def get(self, request, *args, **kwargs):
return Response({
'players': reverse(PlayerList.name, request=request),
'game-categories': reverse(GameCategoryList.name, request=request),
'games': reverse(GameList.name, request=request),
'scores': reverse(PlayerScoreList.name, request=request),
'users': reverse(UserList.name, request=request),
})
| 29.138554 | 79 | 0.686583 | 450 | 4,837 | 7.222222 | 0.226667 | 0.027692 | 0.061538 | 0.076923 | 0.402462 | 0.334769 | 0.302154 | 0.241231 | 0.212923 | 0 | 0 | 0 | 0.224313 | 4,837 | 165 | 80 | 29.315152 | 0.866205 | 0.017366 | 0 | 0.328467 | 0 | 0 | 0.095348 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014599 | false | 0 | 0.131387 | 0.007299 | 0.642336 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ae2972237ded6e1dad156e50a3be71c2a81b789 | 2,096 | py | Python | Py Apple Dynamics V7.3 SRC/PA-Dynamics V7.3/PA_STABLIZE.py | musen142/py-apple-dynamics | 95f831ecf9c9167e9709c63deabc989eda6bf669 | [
"Apache-2.0"
] | 1 | 2022-01-18T11:47:29.000Z | 2022-01-18T11:47:29.000Z | Py Apple Dynamics V7.3 SRC/PA-Dynamics V7.3/PA_STABLIZE.py | musen142/py-apple-dynamics | 95f831ecf9c9167e9709c63deabc989eda6bf669 | [
"Apache-2.0"
] | null | null | null | Py Apple Dynamics V7.3 SRC/PA-Dynamics V7.3/PA_STABLIZE.py | musen142/py-apple-dynamics | 95f831ecf9c9167e9709c63deabc989eda6bf669 | [
"Apache-2.0"
] | null | null | null | import PA_IMU
from machine import SoftI2C, Pin
import padog
#中间变量定义
q=[]
Sta_Pitch=0
Sta_Roll=0
kp_sta=0.05
p_origin=0
r_origin=0
time_p=0
filter_data_p=0
filter_data_r=0
gyro_cal_sta=0
gyro_x_fitted=0
gyro_y_fitted=0
acc_z_fitted=0
#设置陀螺仪 IIC 接口
print("IMU启动中...")
i2cc = SoftI2C(scl=Pin(22), sda=Pin(21)) #集成板
acc = PA_IMU.accel(i2cc)
acc.error_gy()
def get_imu_value():
global q,filter_data_p,filter_data_r,time_p,p_origin,r_origin,gyro_cal_sta,gyro_x_fitted,gyro_y_fitted,acc_z_fitted
ay=acc.get_values()
if time_p<=199:
padog.alarm(50,300,500)
if padog.PIT_goal!=0 or padog.ROL_goal!=0:
padog.PIT_goal=0
padog.ROL_goal=0
else:
try:
p_origin=round(q[1])
r_origin=round(q[2])
time_p=time_p+1
gyro_cal_sta=0
except:
time_p=0
gyro_cal_sta=0
elif time_p==200:
padog.alarm(0,0,0)
time_p=time_p+1
else:
try:
filter_data_p = q[1]
filter_data_r = q[2]
gyro_x_fitted= ay["GyX"]/65.5*0.0174533 #输出P角速度
gyro_y_fitted= ay["GyY"]/65.5*0.0174533 #输出P角速度
acc_z_fitted=ay["AcZ"]/8192 #输出Z加速度
gyro_cal_sta=1
except:
gyro_cal_sta=1
q=PA_IMU.IMUupdate(ay["GyX"]/65.5*0.0174533,ay["GyY"]/65.5*0.0174533,ay["GyZ"]/65.5*0.0174533,ay["AcX"]/8192,ay["AcY"]/8192,ay["AcZ"]/8192)
def stab():
global Sta_Pitch,Sta_Roll
global p_origin,r_origin
get_imu_value()
Sta_Pitch=Sta_Pitch-(filter_data_p-p_origin)*padog.pit_Kp_G-gyro_x_fitted*padog.pit_Kd_G
if Sta_Pitch>=padog.pit_max_ang:
Sta_Pitch=padog.pit_max_ang
elif Sta_Pitch<=-padog.pit_max_ang:
Sta_Pitch=-padog.pit_max_ang
Sta_Roll=Sta_Roll-(filter_data_r-r_origin)*padog.rol_Kp_G-gyro_y_fitted*padog.rol_Kd_G
if Sta_Roll>=padog.rol_max_ang:
Sta_Roll=padog.rol_max_ang
elif Sta_Roll<=-padog.rol_max_ang:
Sta_Roll=-padog.rol_max_ang
if gyro_cal_sta==1:
padog.PIT_goal=Sta_Pitch
padog.ROL_goal=Sta_Roll
padog.acc_z=acc_z_fitted
else: #清空防止跳动抖动
Sta_Pitch=0
Sta_Roll=0
| 18.714286 | 141 | 0.677481 | 396 | 2,096 | 3.247475 | 0.217172 | 0.062208 | 0.054432 | 0.042768 | 0.269051 | 0.212286 | 0.136081 | 0.133748 | 0.133748 | 0.133748 | 0 | 0.076145 | 0.197996 | 2,096 | 111 | 142 | 18.882883 | 0.688876 | 0.022424 | 0 | 0.277778 | 0 | 0 | 0.017822 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.041667 | 0 | 0.069444 | 0.013889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ae34fdb409fbf7d4b21e8249daa9e8cda1a588d | 4,559 | py | Python | httpie/manager/cli.py | HenryGessau/httpie | 85ba9ad8eaa718d7f9dbcb7129168d6a877f3d30 | [
"BSD-3-Clause"
] | 2 | 2022-01-31T18:18:58.000Z | 2022-01-31T18:26:35.000Z | httpie/manager/cli.py | isidentical/httpie | 85ba9ad8eaa718d7f9dbcb7129168d6a877f3d30 | [
"BSD-3-Clause"
] | 2 | 2022-03-05T19:16:08.000Z | 2022-03-05T19:16:09.000Z | httpie/manager/cli.py | isidentical/httpie | 85ba9ad8eaa718d7f9dbcb7129168d6a877f3d30 | [
"BSD-3-Clause"
] | null | null | null | from textwrap import dedent
from httpie.cli.argparser import HTTPieManagerArgumentParser
from httpie import __version__
CLI_SESSION_UPGRADE_FLAGS = [
{
'flags': ['--bind-cookies'],
'action': 'store_true',
'default': False,
'help': 'Bind domainless cookies to the host that session belongs.'
}
]
COMMANDS = {
'cli': {
'help': 'Manage HTTPie for Terminal',
'export-args': [
'Export available options for the CLI',
{
'flags': ['-f', '--format'],
'choices': ['json'],
'default': 'json'
}
],
'sessions': {
'help': 'Manage HTTPie sessions',
'upgrade': [
'Upgrade the given HTTPie session with the latest '
'layout. A list of changes between different session versions '
'can be found in the official documentation.',
{
'dest': 'hostname',
'metavar': 'HOSTNAME',
'help': 'The host this session belongs.'
},
{
'dest': 'session',
'metavar': 'SESSION_NAME_OR_PATH',
'help': 'The name or the path for the session that will be upgraded.'
},
*CLI_SESSION_UPGRADE_FLAGS
],
'upgrade-all': [
'Upgrade all named sessions with the latest layout. A list of '
'changes between different session versions can be found in the official '
'documentation.',
*CLI_SESSION_UPGRADE_FLAGS
],
}
}
}
COMMANDS['plugins'] = COMMANDS['cli']['plugins'] = {
'help': 'Manage HTTPie plugins.',
'install': [
'Install the given targets from PyPI '
'or from a local paths.',
{
'dest': 'targets',
'nargs': '+',
'help': 'targets to install'
}
],
'upgrade': [
'Upgrade the given plugins',
{
'dest': 'targets',
'nargs': '+',
'help': 'targets to upgrade'
}
],
'uninstall': [
'Uninstall the given HTTPie plugins.',
{
'dest': 'targets',
'nargs': '+',
'help': 'targets to install'
}
],
'list': [
'List all installed HTTPie plugins.'
],
}
def missing_subcommand(*args) -> str:
base = COMMANDS
for arg in args:
base = base[arg]
assert isinstance(base, dict)
subcommands = ', '.join(map(repr, base.keys()))
return f'Please specify one of these: {subcommands}'
def generate_subparsers(root, parent_parser, definitions):
action_dest = '_'.join(parent_parser.prog.split()[1:] + ['action'])
actions = parent_parser.add_subparsers(
dest=action_dest
)
for command, properties in definitions.items():
is_subparser = isinstance(properties, dict)
properties = properties.copy()
descr = properties.pop('help', None) if is_subparser else properties.pop(0)
command_parser = actions.add_parser(command, description=descr)
command_parser.root = root
if is_subparser:
generate_subparsers(root, command_parser, properties)
continue
for argument in properties:
argument = argument.copy()
flags = argument.pop('flags', [])
command_parser.add_argument(*flags, **argument)
parser = HTTPieManagerArgumentParser(
prog='httpie',
description=dedent(
'''
Managing interface for the HTTPie itself. <https://httpie.io/docs#manager>
Be aware that you might be looking for http/https commands for sending
HTTP requests. This command is only available for managing the HTTTPie
plugins and the configuration around it.
'''
),
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help='''
Prints the exception traceback should one occur, as well as other
information useful for debugging HTTPie itself and for reporting bugs.
'''
)
parser.add_argument(
'--traceback',
action='store_true',
default=False,
help='''
Prints the exception traceback should one occur.
'''
)
parser.add_argument(
'--version',
action='version',
version=__version__,
help='''
Show version and exit.
'''
)
generate_subparsers(parser, parser, COMMANDS)
| 27.79878 | 90 | 0.551875 | 444 | 4,559 | 5.563063 | 0.353604 | 0.018219 | 0.02753 | 0.026721 | 0.198381 | 0.198381 | 0.18583 | 0.139271 | 0.139271 | 0.139271 | 0 | 0.00066 | 0.3356 | 4,559 | 163 | 91 | 27.969325 | 0.81479 | 0 | 0 | 0.235294 | 0 | 0 | 0.337644 | 0 | 0 | 0 | 0 | 0 | 0.007353 | 1 | 0.014706 | false | 0 | 0.022059 | 0 | 0.044118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ae43640e31a5c0da290f45092cd5664aabc7b21 | 7,857 | py | Python | server.py | CMU-IDS-2020/fp-lyric-visualization | 621a36308ba9d19c965ec6cdfd9e0c5f07069b90 | [
"BSD-3-Clause"
] | 1 | 2020-12-03T19:23:44.000Z | 2020-12-03T19:23:44.000Z | server.py | CMU-IDS-2020/fp-lyric-visualization | 621a36308ba9d19c965ec6cdfd9e0c5f07069b90 | [
"BSD-3-Clause"
] | null | null | null | server.py | CMU-IDS-2020/fp-lyric-visualization | 621a36308ba9d19c965ec6cdfd9e0c5f07069b90 | [
"BSD-3-Clause"
] | null | null | null | import http.server
import socketserver
import json
import os
import string
import pandas as pd
import numpy as np
import argparse
from urllib.parse import urlparse, parse_qs
from preprocessing.lyrics_preprocess import preprocess_lyrics
from preprocessing.dict_preprocess import preprocess_dict
from preprocessing.merge_dataframes import merge_dataframes, positivity_barplot_data
from preprocessing.tsne import tsne_list
from spotify_embedding import get_spotify_embedding
import lyricsgenius
genius = lyricsgenius.Genius("tub_dvzlNtK1D1lLS7o4YUqX2fGBnJdAVbW_OgjEjRKtfhyUopjvonY50UzhPlKe")
CACHE_DIR = 'cache'
HAND_PICKED_EXAMPLE_CACHE = 'hand_picked_example_cache'
if not os.path.exists(CACHE_DIR): os.mkdir(CACHE_DIR)
if not os.path.exists(HAND_PICKED_EXAMPLE_CACHE): os.mkdir(HAND_PICKED_EXAMPLE_CACHE)
def format_filename(s):
"""Take a string and return a valid filename constructed from the string.
Uses a whitelist approach: any characters not present in valid_chars are
removed. Also spaces are replaced with underscores.
Note: this method may produce invalid filenames such as ``, `.` or `..`
When I use this method I prepend a date string like '2009_01_15_19_46_32_'
and append a file extension like '.txt', so I avoid the potential of using
an invalid filename.
FROM: https://gist.github.com/seanh/93666
"""
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in s if c in valid_chars)
filename = filename.replace(' ','_') # I don't like spaces in filenames.
return filename
def get_lyrics_df(artist_name, song_name, isHandPickedExample):
lyrics_fn = os.path.join(HAND_PICKED_EXAMPLE_CACHE if isHandPickedExample else CACHE_DIR,
format_filename(artist_name + " " + song_name + " lyrics.csv"))
lines_fn = os.path.join(HAND_PICKED_EXAMPLE_CACHE if isHandPickedExample else CACHE_DIR,
format_filename(artist_name + " " + song_name + " lines.csv"))
if os.path.exists(lyrics_fn):
# Used cached file
print('Using a cached file for the song: ', song_name)
lyrics_df = pd.read_csv(lyrics_fn, encoding="utf-8")
lines_df = pd.read_csv(lines_fn, encoding="utf-8")
else:
# Start from scratch
lyrics_df, lines_df = preprocess_lyrics(artist_name, song_name)
dict_df = preprocess_dict(lyrics_df)
lyrics_df, lines_df = merge_dataframes(lyrics_df, lines_df, dict_df)
# Adding song name and artist name to the df
lyrics_df['song_name'] = song_name
lines_df['song_name'] = song_name
lyrics_df['artist_name'] = artist_name
lines_df['artist_name'] = artist_name
# Cache the file
lyrics_df.to_csv(lyrics_fn, encoding="utf-8", index=False)
lines_df.to_csv(lines_fn, encoding="utf-8", index=False)
# NaN's will mess up the encoding. Convert them to empty strings.
lyrics_df.replace(np.nan, '', regex=True, inplace=True)
lines_df.replace(np.nan, '', regex=True, inplace=True)
return lyrics_df, lines_df, lyrics_fn, lines_fn
def compute_tsne(df0, df1, column):
''' Compute t-SNE using a specified column from two different data frames
Alters the dataframes directly, no need to return them'''
all_values_unique = list(set(list(df0[column].unique()) + list(df1[column].unique())))
tsne_dict = tsne_list(all_values_unique)
df0['tsne_x_combined'] = df0[column].apply(lambda x: tsne_dict[x][0])
df0['tsne_y_combined'] = df0[column].apply(lambda x: tsne_dict[x][1])
df1['tsne_x_combined'] = df1[column].apply(lambda x: tsne_dict[x][0])
df1['tsne_y_combined'] = df1[column].apply(lambda x: tsne_dict[x][1])
class MyRequestHandler(http.server.SimpleHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_HEAD(self):
self._set_headers()
def do_GET(self):
possible_name = self.path.strip("/")+'.html'
if self.path == '/':
self.path = '/index.html'
elif self.path.startswith('/getSong'):
query = urlparse(self.path).query
query_components = parse_qs(query)
artist_name0, song_name0 = query_components["artist0"][0].lower().strip(), query_components["songName0"][0].lower().strip()
artist_name1, song_name1 = query_components["artist1"][0].lower().strip(), query_components["songName1"][0].lower().strip()
# Get genius results in case the user did not type something correctly.
# Helps with consistency in file names.
song = genius.search_song(song_name0, artist_name0)
song_name0, artist_name0 = song.title, song.artist
song = genius.search_song(song_name1, artist_name1)
song_name1, artist_name1 = song.title, song.artist
# Check if this is a pre-selected pairing of songs. If so, we might be able to avoid computing t-SNE
isHandPickedExample = query_components["isHandPickedExample"][0].lower().strip() == 'true'
lyrics_df0, lines_df0, lyrics_fn0, lines_fn0 = get_lyrics_df(artist_name0, song_name0, isHandPickedExample)
lyrics_df1, lines_df1, lyrics_fn1, lines_fn1 = get_lyrics_df(artist_name1, song_name1, isHandPickedExample)
# Positivity Barplot data
pos_barplot_data = positivity_barplot_data(lyrics_df0, lyrics_df1)
if isHandPickedExample and ('tsne_x_combined' in lyrics_df0.columns) and ('tsne_x_combined' in lyrics_df1.columns):
# Don't need to do anything, cached file is good to go
print('Using cached version of a pre-selected example as is. No need to compute t-SNE.')
else:
# Compute t-SNE because it wasn't saved for this song comparison combination
# Do the tsne for the words combined
print('Computing t-SNE.')
compute_tsne(lyrics_df0, lyrics_df1, 'word_can_search')
# Do the tsne for the words combined
compute_tsne(lines_df0, lines_df1, 'line_classified')
if isHandPickedExample:
# Save this pre-selected pairing again to save the t-SNE values
lyrics_df0.to_csv(lyrics_fn0, encoding="utf-8", index=False)
lines_df0.to_csv(lines_fn0, encoding="utf-8", index=False)
lyrics_df1.to_csv(lyrics_fn1, encoding="utf-8", index=False)
lines_df1.to_csv(lines_fn1, encoding="utf-8", index=False)
# Convert to dictionaries
lyrics0, lyrics1, lines0, lines1 = lyrics_df0.to_dict('records'), lyrics_df1.to_dict('records'), \
lines_df0.to_dict('records'), lines_df1.to_dict('records')
output_json = json.dumps({
'lyrics0' : lyrics0,
'lines0' : lines0,
'lyrics1': lyrics1,
'lines1': lines1,
'pos_barplot_data': pos_barplot_data,
'song0_html_embedding': get_spotify_embedding(song_name0, artist_name0),
'song1_html_embedding': get_spotify_embedding(song_name1, artist_name1)})
self._set_headers()
self.wfile.write(output_json.encode())
return
return http.server.SimpleHTTPRequestHandler.do_GET(self)
parser = argparse.ArgumentParser(description='Server Parameters')
parser.add_argument('--port', type=int, default=8081, help='sum the integers (default: find the max)')
args = parser.parse_args()
Handler = MyRequestHandler
with socketserver.TCPServer(("", args.port), Handler) as httpd:
print("serving at port", args.port)
httpd.serve_forever() | 46.491124 | 135 | 0.67723 | 1,048 | 7,857 | 4.848282 | 0.271947 | 0.020468 | 0.018894 | 0.025979 | 0.239323 | 0.161976 | 0.094076 | 0.083055 | 0.068884 | 0.038969 | 0 | 0.020994 | 0.224004 | 7,857 | 169 | 136 | 46.491124 | 0.812367 | 0.167367 | 0 | 0.036364 | 0 | 0 | 0.116602 | 0.013745 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054545 | false | 0 | 0.136364 | 0 | 0.227273 | 0.036364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aeb7836f2091566f92e51e2a1bbbaab9846250b | 2,451 | py | Python | training/sklearn_training.py | dida-do/dida-tools | 95979e076cb7e7707d50e34b1f2e16d77c95bf9d | [
"Apache-2.0"
] | 1 | 2021-04-14T15:47:54.000Z | 2021-04-14T15:47:54.000Z | training/sklearn_training.py | dida-do/dida-tools | 95979e076cb7e7707d50e34b1f2e16d77c95bf9d | [
"Apache-2.0"
] | null | null | null | training/sklearn_training.py | dida-do/dida-tools | 95979e076cb7e7707d50e34b1f2e16d77c95bf9d | [
"Apache-2.0"
] | null | null | null | import os
import sys
from datetime import datetime
import joblib
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, multilabel_confusion_matrix
from config.config import global_config
from utils.path import create_dirs
from utils.logging.csvinterface import write_log
from utils.logging.log import Log
train_config = {
"DATE": datetime.now().strftime("%Y%m%d-%H%M%S"),
"SESSION_NAME": "sklearn_training-run",
"ROUTINE_NAME": sys.modules[__name__],
"MODEL": SVC,
"MODEL_CONFIG": {
"C": 1.,
"kernel": "rbf"
},
"LOSS": accuracy_score,
"METRICS": {
"accuracy": accuracy_score,
"confusion_matrix": multilabel_confusion_matrix
},
"LOGFILE": "sklearn_experiments.csv",
"__COMMENT": None
}
def train(X_train, X_val, y_train, y_val, train_config: dict=train_config,
global_config: dict=global_config, save_model: bool=True):
# create paths if necessary
for path in global_config.values():
create_dirs(path)
# model name and path
name = "_".join([train_config["DATE"], train_config["SESSION_NAME"]])
model_path = os.path.join(global_config["WEIGHT_DIR"], name)
# instantiate model
model = train_config["MODEL"](**train_config["MODEL_CONFIG"])
# fit to training data
model.fit(X_train, y_train)
# dump model to disk
if save_model:
joblib.dump(model, model_path + ".joblib")
# log metrics to csv
train_predictions = model.predict(X_train)
val_predictions = model.predict(X_val)
log_content = train_config.copy()
log_content["TRAIN_LOSS"] = train_config["LOSS"](y_train, train_predictions)
log_content["VAL_LOSS"] = train_config["LOSS"](y_val, val_predictions)
log_content["TRAIN_METRICS"] = {}
log_content["VAL_METRICS"] = {}
for key, metric in train_config["METRICS"].items():
log_content["TRAIN_METRICS"] = metric(y_train, train_predictions)
log_content["VAL_METRICS"][key] = metric(y_val, val_predictions)
log_path = os.path.join(global_config["LOG_DIR"], train_config["LOGFILE"])
write_log(log_path, log_content)
# log metrics to mlflow
logger = Log(train_config=train_config, run_name=train_config["SESSION_NAME"])
logger.log_metric("Train Loss", log_content["TRAIN_LOSS"])
logger.log_metric("Validation Loss", log_content["VAL_LOSS"])
# return validation loss
return log_content["VAL_LOSS"]
| 31.831169 | 82 | 0.698082 | 328 | 2,451 | 4.920732 | 0.259146 | 0.10223 | 0.046468 | 0.031599 | 0.125155 | 0.075589 | 0.043371 | 0 | 0 | 0 | 0 | 0.000497 | 0.178703 | 2,451 | 76 | 83 | 32.25 | 0.801292 | 0.068135 | 0 | 0 | 0 | 0 | 0.163077 | 0.01011 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018868 | false | 0 | 0.188679 | 0 | 0.226415 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aeba58f4ee17199bdf7918bb133d066173af0ab | 2,987 | py | Python | pyglet-hg/contrib/layout/tests/layout/HTML_BUILDER.py | sangh/LaserShow | abc95e465e3455dc220cc602dd58358c84666f29 | [
"BSD-3-Clause"
] | 21 | 2015-11-03T03:15:36.000Z | 2021-03-15T22:00:47.000Z | contrib/layout/tests/layout/HTML_BUILDER.py | seeminglee/pyglet64 | 3dd167b5b0d3ad132a157e404586e53c2bb21736 | [
"BSD-3-Clause"
] | 3 | 2017-09-14T14:08:28.000Z | 2019-05-20T04:38:15.000Z | contrib/layout/tests/layout/HTML_BUILDER.py | seeminglee/pyglet64 | 3dd167b5b0d3ad132a157e404586e53c2bb21736 | [
"BSD-3-Clause"
] | 23 | 2017-04-15T19:23:08.000Z | 2020-09-08T11:55:29.000Z | #!/usr/bin/env python
'''Test content tree construction from HTML source.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import unittest
from layout.content import *
from layout.builders.htmlbuilder import *
class HTMLBuilderTest(unittest.TestCase):
def check(self, test, expected):
document = Document()
builder = HTMLBuilder(document)
builder.feed(test)
builder.close()
result = self.canonical(document.root)
self.assertTrue(result == expected,
'Result:\n%s\nExpected:\n%s\n' % (result, expected))
def canonical(self, element):
can = ''
if not element.is_anonymous:
can += '<%s>' % element.name
for child in element.children:
can += self.canonical(child)
if element.text:
can += element.text.strip()
if not element.is_anonymous:
can += '</%s>' % element.name
return can
def test_sanity(self):
self.check(
'<html><body><p>Hello</p></body></html>',
'<html><body><p>Hello</p></body></html>')
def test_noopen_html(self):
self.check(
'<head><title>Goodbye</title></head><body><p>Hello</p></body></html>',
'<html><body><p>Hello</p></body></html>')
def test_noopen_head(self):
self.check(
'<title>Goodbye</title></head><body><p>Hello</p></body></html>',
'<html><body><p>Hello</p></body></html>')
def test_noopen_body(self):
self.check(
'<title>Goodbye</title></head><p>Hello</p></body></html>',
'<html><body><p>Hello</p></body></html>')
def test_noclose_html(self):
self.check(
'<html><head><title>Goodbye</title></head><body><p>Hello</p></body>',
'<html><body><p>Hello</p></body></html>')
def test_noclose_head(self):
self.check(
'<html><head><title>Goodbye</title><body><p>Hello</p></body>',
'<html><body><p>Hello</p></body></html>')
def test_noclose_title(self):
self.check(
'<html><head><title>Goodbye</head><body><p>Hello</p></body>',
'<html><body><p>Hello</p></body></html>')
def test_noclose_any(self):
self.check(
'<html><head><title>Goodbye<body><p>Hello',
'<html><body><p>Hello</p></body></html>')
def test_minimal(self):
self.check(
'<title>Goodbye<p>Hello',
'<html><body><p>Hello</p></body></html>')
def test_zen(self):
self.check(
'Hello',
'<html><body>Hello</body></html>')
def test_p(self):
self.check(
'<p>Para1<p>Para2<p>Para3',
'<html><body><p>Para1</p><p>Para2</p><p>Para3</p></body></html>')
def test_nest_div_p(self):
self.check(
'<div><p>Para1<div><p>Para2',
'<html><body><div><p>Para1<div><p>Para2</p></div></p></div></body></html>')
def test_ul_li(self):
self.check(
'<ul><li>One<li>Two<li>Three</ul>',
'<html><body><ul><li>One</li><li>Two</li><li>Three</li></ul></body></html>')
if __name__ == '__main__':
unittest.main()
| 28.721154 | 79 | 0.577503 | 407 | 2,987 | 4.137592 | 0.184275 | 0.095012 | 0.090855 | 0.104513 | 0.501781 | 0.477435 | 0.456057 | 0.38658 | 0.353919 | 0.308789 | 0 | 0.004168 | 0.196853 | 2,987 | 103 | 80 | 29 | 0.697791 | 0.0231 | 0 | 0.303797 | 0 | 0.101266 | 0.411542 | 0.397114 | 0 | 0 | 0 | 0 | 0.012658 | 1 | 0.189873 | false | 0 | 0.037975 | 0 | 0.253165 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aecaff16a8126f08dd1efa215ec7a91a6a713fb | 11,541 | py | Python | loss_analysis/analysis.py | nedles/Loss-Analysis | fc225c7e556f3a85f741008a1aaa5341a6ea54f4 | [
"MIT"
] | 2 | 2018-10-16T14:06:37.000Z | 2019-05-22T02:28:50.000Z | loss_analysis/analysis.py | nedles/Loss-Analysis | fc225c7e556f3a85f741008a1aaa5341a6ea54f4 | [
"MIT"
] | 2 | 2017-02-07T06:53:40.000Z | 2017-03-08T09:22:19.000Z | loss_analysis/analysis.py | nedles/Loss-Analysis | fc225c7e556f3a85f741008a1aaa5341a6ea54f4 | [
"MIT"
] | 2 | 2017-02-04T05:35:37.000Z | 2019-05-13T03:28:12.000Z | import numpy as np
from scipy import constants
from scipy.optimize import curve_fit
import os
from numpy.polynomial import polynomial as poly
from scipy.special import lambertw
# use absolute file path so tests work
path_const = os.path.join(os.path.dirname(__file__), '..', 'constants')
def AM15G_resample(wl):
'''
Returns AM1.5G spectrum at given wavelengths,
scaled to the new data interval (assumes even data spacing)
xxx is this the best way?
inputs:
wavelength: (array like)
the measured wavelengths in nanometers.
outputs:
current density per interval: (array like)
'''
interval = abs(wl[1] - wl[0]) # a ratio to 1nm (default)
AM15G_wl = np.genfromtxt(os.path.join(path_const, 'AM1.5G_spectrum.dat'),
usecols=(0,), skip_header=1)
AM15G_Jph = np.genfromtxt(os.path.join(path_const, 'AM1.5G_spectrum.dat'),
usecols=(1,), skip_header=1)
return interval * np.interp(wl, AM15G_wl, AM15G_Jph)
def find_nearest(x_val, xdata, ydata=None):
'''
Finds the nearest index in 'xdata' to 'value'
Returns corresponding 'ydata' value if given
'''
xdata = np.array(xdata)
nearest = (np.abs(xdata - x_val)).argmin()
if ydata is not None:
ydata = np.array(ydata)
assert xdata.shape[0] == ydata.shape[0]
nearest = ydata[nearest]
return nearest
def wl_to_alpha(wavelength):
'''
Returns the band to band absorption coefficient for Silicon given a
wavelength. Linear interpolation is performed if the exact values are
not provided.
The values are taken from Green 2008
DOI:10.1016/j.solmat.2008.06.009
inputs:
wavelength: (float)
wavelength in nm
outputs:
wavelength: (float)
wavelength in nm
'''
alpha_data = np.genfromtxt(
os.path.join(path_const, 'Si_alpha_Green_2008.dat'),
usecols=(0, 1), skip_header=1).transpose()
wl = alpha_data[0]
alpha = alpha_data[1]
return np.interp(wavelength, wl, alpha)
def fit_Basore(wavelength, IQE, theta=0, wlbounds=(1040, 1100)):
'''
Linear fit of IQE to extract effective bulk lifetime
This is just a linear fit over limited wavelengths
Extracts an effective bulk diffusion length.
Inputs:
wavelength: (array like)
the measured wavelengths in nano meters.
IQE: (array like)
the measured internal quantum efficiency in units %.
theta: (float, optional)
The average angle the light travels through the sample.
This can be used to partially correct for textured surfaces.
The default is 0. In units of degrees,
wlbounds: (tuple, optional)
The bounds between which the linear fit is performed.
The first touple should be the min and then the max.
The default is 1040 nm to 1100 nm.
Returns:
a tuple of
a dictionary containing
L_eff: the effective diffusion length (cm)
a plotting function
See Basore 1993
doi:10.1109/PVSC.1993.347063
'''
index = (wavelength >= wlbounds[0]) * (wavelength <= wlbounds[1])
IQE = np.copy(IQE[index])
wavelength = np.copy(wavelength[index])
fit_params = ['Leff']
alpha = wl_to_alpha(wavelength) / float(np.cos(np.radians(theta)))
coefs = poly.polyfit(1. / alpha, 1. / IQE, 1)
# xxx check these calcs
fit_output = {'Leff': coefs[1],
'eta_c': 1 / coefs[0]}
def plot_Basore_fit(ax):
ax.plot(1. / alpha, 1. / IQE, '-o', label='data')
ax.plot(1. / alpha, poly.polyval(1. / alpha, coefs), label='fit_Basore')
ax.set_xlabel('$1/ \\alpha$ [$cm^2$]')
ax.set_ylabel('$1/IQE$ []')
ax.grid(True)
ax.legend(loc='best')
return fit_output, plot_Basore_fit
def Rs_calc_1(Vmp, Jmp, sunsVoc_V, sunsVoc_J):
# TODO: not finished
# sunsVoc method
V_sunsVoc = find_nearest(Jmp, sunsVoc_J, sunsVoc_V)
return (V_sunsVoc - Vmp) / Jmp
def Rs_calc_2(Voc, Jsc, FF, pFF):
'''
TODO: improve
From:
Solar Cells: Operating Principles, Technology and System Applications
taken from ernst2016efficiency
'''
return Voc / Jsc * (1 - FF / pFF)
def _Vth(T):
# this is here so it is the only place I need to define a default
# temperature
if T == None:
T = 300
return constants.k * T / constants.e
def ideal_FF(Voc, T=None):
'''
Calculates the ideal fill factor.
inputs:
Voc: (float)
Open circuit voltage in volts
T: (float, optional)
Temperature in Kelvin, default of 300K
output:
FF_0:
The ideal fill factor
Valid for:
Voc * q / k / T > 10
Accuracy: 1e-4
Source: Green, 1982
http://dx.doi.org/10.1016/0379-6787(82)90057-6
'''
voc = Voc / _Vth(T)
FF_0 = (voc - np.log(voc + 0.72)) / (voc + 1)
return FF_0
def ideal_FF_2016(Voc, T=None):
'''
Calculates the ideal fill factor.
inputs:
Voc: (float)
Open circuit voltage in volts
T: (float, optional)
Temperature in Kelvin, default of 300K
output:
FF_0:
The ideal fill factor
Valid for:
??
Accuracy: ??
Source: Green, 2016
http://dx.doi.org/10.1063/1.4942660
'''
voc = Voc / _Vth(T)
z0 = np.exp(voc + 1)
# inverse f0
if0 = 1. - np.exp(-voc)
FF_0 = (lambertw(z0) - 1)**2 / if0 / voc / lambertw(z0)
return FF_0.real
def ideal_FF_series(Voc, Jsc, Rs, T=None):
'''
Calculates the ideal fill factor accounting for series resistance
inputs:
Voc: (float)
Open circuit voltage in volts
Jsc: (float)
The short circuit current in amps
Rs: (float)
The series resistance in Ohms?
T: (float)
Temperature in Kelvin
output:
FF_s:
The ideal fill factor accounting for series resistance
Valid for:
Voc * q / k / T > 10
Rs * Jsc / Voc < 0.4
Accuracy: 4e-3
Source: Green, 1982
http://dx.doi.org/10.1016/0379-6787(82)90057-6
'''
FF_0 = ideal_FF(Voc, T)
rs = Rs / Voc * Jsc
FF_s = FF_0 * (1 - 1.1 * rs) + rs**2 / 5.4
return FF_s
def ideal_FF_series_2016(Voc, Jsc, Rs, T=None):
'''
Calculates the ideal fill factor.
inputs:
Voc: (float)
Open circuit voltage in volts
T: (float, optional)
Temperature in Kelvin, default of 300K
output:
FF_0:
The ideal fill factor
Valid for:
??
Accuracy: Approximately 4 digit accuracy is maintained in
technologically interesting cases, where losses are <5% for
normalised Voc>10.
Source: Green, 2016
http://dx.doi.org/10.1063/1.4942660
'''
FF_0 = ideal_FF_2016(Voc, T)
# normalised values
voc = Voc / _Vth(T)
rs = Rs / Voc * Jsc
# other factors
if0 = 1. - np.exp(-voc)
ifs = 1. - np.exp(-voc * (1 - rs))
z0 = np.exp(voc + 1)
# calculate it
FF_s = FF_0 * (1 - voc / lambertw(z0) * rs / if0) * if0 / ifs
return FF_s.real
def ideal_FF_series_shunt(Voc, Jsc, Rs, Rsh, T=None):
'''
Calculates the ideal fill factor, accounting for shunt and series resistance.
inputs:
Voc: (float)
Open circuit voltage in volts
Jsc: (float)
The short circuit current in amps
Rs: (float)
The series resistance in Ohms?
Rsh: (float)
The shunt resistance in Ohms?
T: (float)
Temperature in Kelvin
output:
FF_sh_s:
The ideal fill factor accounting for shunt and series resistance
Valid for:
Voc * q / k / T > 10
< 0.4
Rs * Jsc / Voc + Voc / Rsh / Jsc < 0.4
Accuracy: 3e-2
Source: Green, 1982
http://dx.doi.org/10.1016/0379-6787(82)90057-6
'''
FF_s = ideal_FF_series(Voc, Jsc, Rs, T)
voc = Voc / _Vth(T)
rsh = Rsh / Voc * Jsc
FF_s_sh = FF_s * (1 - (voc - 0.7) / voc * FF_s / rsh)
return FF_s_sh
def ideal_FF_shunt_2016(Voc, Rsh, T=None):
'''
Calculates the ideal fill factor, accounting for shunt and series resistance.
inputs:
Voc: (float)
Open circuit voltage in volts
Jsc: (float)
The short circuit current in amps
Rs: (float)
The series resistance in Ohms?
Rsh: (float)
The shunt resistance in Ohms?
T: (float)
Temperature in Kelvin
output:
FF_sh_s:
The ideal fill factor accounting for shunt and series resistance
Valid for:
Voc * q / k / T > 10
< 0.4
Rs * Jsc / Voc + Voc / Rsh / Jsc < 0.4
Accuracy: 3e-2
Source: Green, 1982
http://dx.doi.org/10.1016/0379-6787(82)90057-6
'''
FF_0 = ideal_FF_2016(Voc, T)
# normalised values
voc = Voc / _Vth(T)
rsh = Rsh / Voc * Jsc
# other factors
if0 = 1. - np.exp(-voc)
z0 = np.exp(voc + 1)
# calculate it
FF_sh = FF_0 * (1 - lambertw(z0) * if0 / voc /
rsh / if0) / (1 - 1 / (voc * rsh))
return FF_sh.real
def FF_loss_series(Voc, Jsc, Jmp, Rs):
'''
Calculates the loss in fill factor from series resistance
inputs:
Voc: (float)
Open circuit voltage in [V]
Jsc: (float)
Short circuit current density in [A cm^{-1}]
Jmp: (float)
Maximum power point current density in [A cm^{-2}]
Rs: (float)
Series resistance in [Ohm cm^2]
output:
FF_Rs: (float)
The increase in fill factor expected by removing the series resistance
Dimensionless units
Source: Khanna, 2013
http://dx.doi.org/10.1109/JPHOTOV.2013.2270348
'''
FF_Rs = Jmp**2 * Rs / (Voc * Jsc)
return FF_Rs
def FF_loss_shunt(Voc, Jsc, Vmp, Jmp, Rs, Rsh):
'''
Calculates the loss in fill factor from shunt resistance
inputs:
Voc: (float)
Open circuit voltage in [V]
Jsc: (float)
Short circuit current density in [A cm^{-1}]
Vmp: (float)
Maximum power point voltage in [V]
Jmp: (float)
Maximum power point current density in [A cm^{-2}]
Rs: (float)
Series resistance in [Ohm cm^2]
Rsh: (float)
Shunt resistance in [Ohm cm^2]
output:
FF_Rs: (float)
The increase in fill factor expected by removing the series resistance
Dimensionless units
Source: Khanna, 2013
http://dx.doi.org/10.1109/JPHOTOV.2013.2270348
'''
FF_Rsh = (Vmp + Rs * Jmp)**2 / (Voc * Jsc * Rsh)
return FF_Rsh
def ideality_factor(V, J, Vth):
'''
Calculates the ideality factor
This assumes that: $e^{V/mVt} >> 1$
This log form is used as it appears to be more robust against noise.
'''
with np.errstate(divide='ignore', invalid='ignore'):
m = 1. / Vth / np.gradient(np.log(J)) * np.gradient(V)
return m
if __name__ == '__main__':
print(ideal_FF(0.6, 300))
print(ideal_FF_2016(0.6, 300))
print(ideal_FF_series_2016(0.6, 0.04, 1, 300))
print(ideal_FF_series(0.6, 0.04, 1, 300))
| 26.11086 | 82 | 0.578113 | 1,598 | 11,541 | 4.086984 | 0.212766 | 0.024499 | 0.022049 | 0.033073 | 0.48798 | 0.461951 | 0.453989 | 0.432399 | 0.401011 | 0.384015 | 0 | 0.058554 | 0.319296 | 11,541 | 441 | 83 | 26.170068 | 0.772785 | 0.543887 | 0 | 0.155963 | 0 | 0 | 0.036128 | 0.005327 | 0 | 0 | 0 | 0.004535 | 0.009174 | 1 | 0.155963 | false | 0 | 0.055046 | 0 | 0.357798 | 0.036697 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aeea559be1ad437140c0db3ffe9e14fd5e68514 | 4,590 | py | Python | scripts/git-role-management/internal/config.py | msleprosy/cloud-pipeline | bccc2b196fad982380efc37a1c3785098bec6c85 | [
"Apache-2.0"
] | 126 | 2019-03-22T19:40:38.000Z | 2022-02-16T13:01:44.000Z | scripts/git-role-management/internal/config.py | msleprosy/cloud-pipeline | bccc2b196fad982380efc37a1c3785098bec6c85 | [
"Apache-2.0"
] | 1,189 | 2019-03-25T10:39:27.000Z | 2022-03-31T12:50:33.000Z | scripts/git-role-management/internal/config.py | msleprosy/cloud-pipeline | bccc2b196fad982380efc37a1c3785098bec6c85 | [
"Apache-2.0"
] | 62 | 2019-03-22T22:09:49.000Z | 2022-03-08T12:05:56.000Z | # Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
class ConfigNotFoundError(Exception):
def __init__(self):
super(ConfigNotFoundError, self).__init__('Unable to locate configuration or it is incomplete.')
class Config(object):
"""Provides a wrapper for a syncgit command configuration"""
def __init__(self, safe_initialization=False):
self.api = os.environ.get('API')
self.access_key = os.environ.get('API_TOKEN')
self.proxy = None
self.email_attribute_name = 'Email'
self.name_attribute_name = 'Name'
self.ssh_pub_metadata_name = 'ssh_pub'
self.ssh_prv_metadata_name = 'ssh_prv'
self.admins_group_name = 'ROLE_ADMIN'
self.git_group_prefix = 'PIPELINE-'
self.git_ssh_title = 'Cloud Pipeline'
if self.api and self.access_key:
return
config_file = Config.config_path()
if os.path.exists(config_file):
with open(config_file, 'r') as config_file_stream:
data = json.load(config_file_stream)
if 'api' in data:
self.api = data['api']
if 'access_key' in data:
self.access_key = data['access_key']
if 'proxy' in data:
self.proxy = data['proxy']
if 'email-attribute-name' in data:
self.email_attribute_name = data['email-attribute-name']
if 'name-attribute-name' in data:
self.name_attribute_name = data['name-attribute-name']
if 'ssh-pub-metadata-name' in data:
self.ssh_pub_metadata_name = data['ssh-pub-metadata-name']
if 'ssh-prv-metadata-name' in data:
self.ssh_prv_metadata_name = data['ssh-prv-metadata-name']
if 'admins-group-name' in data:
self.admins_group_name = data['admins-group-name']
if 'git-group-prefix' in data:
self.git_group_prefix = data['git-group-prefix']
elif not safe_initialization:
raise ConfigNotFoundError()
@classmethod
def store(cls, access_key, api, proxy, email_attribute_name, name_attribute_name, ssh_pub_attribute_name,
ssh_prv_attribute_name, admins_group, git_group_prefix):
current_config = Config.safe_instance()
config = {
'api': api if api is not None else current_config.api,
'access_key': access_key if access_key is not None else current_config.access_key,
'proxy': proxy,
'email-attribute-name': email_attribute_name if email_attribute_name is not None else current_config.email_attribute_name,
'name-attribute-name': name_attribute_name if name_attribute_name is not None else current_config.name_attribute_name,
'ssh-pub-metadata-name': ssh_pub_attribute_name if ssh_pub_attribute_name is not None else current_config.ssh_pub_metadata_name,
'ssh-prv-metadata-name': ssh_prv_attribute_name if ssh_prv_attribute_name is not None else current_config.ssh_prv_metadata_name,
'admins-group-name': admins_group if admins_group is not None else current_config.admins_group_name,
'git-group-prefix': git_group_prefix if git_group_prefix is not None else current_config.git_group_prefix
}
config_file = cls.config_path()
with open(config_file, 'w+') as config_file_stream:
json.dump(config, config_file_stream)
@classmethod
def config_path(cls):
home = os.path.expanduser("~")
config_folder = os.path.join(home, '.syncgit')
if not os.path.exists(config_folder):
os.makedirs(config_folder)
config_file = os.path.join(config_folder, 'config.json')
return config_file
@classmethod
def instance(cls):
return cls()
@classmethod
def safe_instance(cls):
return cls(safe_initialization=True)
| 45 | 140 | 0.656863 | 606 | 4,590 | 4.735974 | 0.231023 | 0.108711 | 0.056446 | 0.036237 | 0.255401 | 0.185714 | 0.056446 | 0.056446 | 0.029268 | 0 | 0 | 0.003513 | 0.255773 | 4,590 | 101 | 141 | 45.445545 | 0.836651 | 0.139216 | 0 | 0.053333 | 0 | 0 | 0.136791 | 0.032037 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.026667 | 0.026667 | 0.186667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2af0092cae8d39cf82ee5d84f536a58bf2b7d61b | 1,585 | py | Python | examples/textbook/plot_triple_with_wind.py | rknop/amuse | 85d5bdcc29cfc87dc69d91c264101fafd6658aec | [
"Apache-2.0"
] | 131 | 2015-06-04T09:06:57.000Z | 2022-02-01T12:11:29.000Z | examples/textbook/plot_triple_with_wind.py | rknop/amuse | 85d5bdcc29cfc87dc69d91c264101fafd6658aec | [
"Apache-2.0"
] | 690 | 2015-10-17T12:18:08.000Z | 2022-03-31T16:15:58.000Z | examples/textbook/plot_triple_with_wind.py | rieder/amuse | 3ac3b6b8f922643657279ddee5c8ab3fc0440d5e | [
"Apache-2.0"
] | 102 | 2015-01-22T10:00:29.000Z | 2022-02-09T13:29:43.000Z | import os
import numpy
from amuse.lab import *
from prepare_figure import single_frame
from distinct_colours import get_distinct
from matplotlib import pyplot
def read_triple_data(filename):
t = []
ain = []
aout = []
ein = []
eout = []
a0in = 0
a0out = 0
for line in open(filename):
if "Triple" in line:
l = line.split()
ti = float(l[3])
if ti <= 0:
a0in = float(l[10])
a0out = float(l[16])
e0in = float(l[12])
e0out = float(l[18])
if ti >= 4:
t.append(float(l[3]))
ain.append(float(l[10])/a0in)
ein.append(float(l[12])/e0in)
aout.append(float(l[16])/a0out)
eout.append(float(l[18])/e0out)
return t, ain, ein, aout, eout
try:
amusedir = os.environ['AMUSE_DIR']
dir = amusedir+'/examples/textbook/'
except:
print('Environment variable AMUSE_DIR not set')
dir = './'
filename = dir+'evolve_triple_with_wind.data'
t, ain, ein, aout, eout = read_triple_data(filename)
x_label = "$a/a_{0}$"
y_label = "$e/e_{0}$"
fig = single_frame(x_label, y_label, logx=False, logy=False,
xsize=10, ysize=8)
color = get_distinct(2)
pyplot.plot(ain, ein, c=color[0], label= 'inner')
pyplot.plot(aout, eout, c=color[1], label= 'outer')
pyplot.legend(loc='best', ncol=1, shadow=False, fontsize=20)
save_file = 'evolve_triple_with_wind.png'
pyplot.savefig(save_file)
print('\nSaved figure in file', save_file,'\n')
pyplot.show()
| 27.327586 | 60 | 0.581073 | 224 | 1,585 | 3.991071 | 0.424107 | 0.067114 | 0.067114 | 0.049217 | 0.033557 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037391 | 0.274448 | 1,585 | 57 | 61 | 27.807018 | 0.74 | 0 | 0 | 0 | 0 | 0 | 0.116719 | 0.0347 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02 | false | 0 | 0.12 | 0 | 0.16 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2af078174bf68ac08af67a7ee7941f5d3418d0d9 | 1,658 | py | Python | main.py | santiagorr/jira-issues-importer | 7bc2b79da1aa467f5686334cc32f4971a371e431 | [
"Apache-2.0"
] | null | null | null | main.py | santiagorr/jira-issues-importer | 7bc2b79da1aa467f5686334cc32f4971a371e431 | [
"Apache-2.0"
] | null | null | null | main.py | santiagorr/jira-issues-importer | 7bc2b79da1aa467f5686334cc32f4971a371e431 | [
"Apache-2.0"
] | 3 | 2021-11-17T15:11:23.000Z | 2022-02-12T10:48:29.000Z | import getpass
from collections import namedtuple
from lxml import objectify
from project import Project
from importer import Importer
from labelcolourselector import LabelColourSelector
def read_xml_sourcefile(file_names):
files = list()
for file_name in file_names.split(';'):
all_text = open(file_name).read()
files.append(objectify.fromstring(all_text))
return files
file_names = input(
'Path to JIRA XML query file (semi-colon separate for multiple files): ')
all_xml_files = read_xml_sourcefile(file_names)
jira_proj = input('JIRA project name: ')
jira_done_id = input('JIRA Done statusCategory ID [default "3"]: ') or '3'
ac = input('GitHub account name: ')
repo = input('GitHub project name: ')
pat = input('GitHub personal access token: ')
start_from_issue = input('Start from [default "0" (beginning)]: ') or '0'
Options = namedtuple("Options", "accesstoken account repo")
opts = Options(accesstoken=pat, account=ac, repo=repo)
project = Project(jira_proj, jira_done_id)
for f in all_xml_files:
for item in f.channel.item:
project.add_item(item)
project.prettify()
input('Press any key to begin...')
'''
Steps:
1. Create any milestones
2. Create any labels
3. Create each issue with comments, linking them to milestones and labels
4: Post-process all comments to replace issue id placeholders with the real ones
'''
importer = Importer(opts, project)
colourSelector = LabelColourSelector(project)
importer.import_milestones()
if int(start_from_issue) == 0:
importer.import_labels(colourSelector)
importer.import_issues(int(start_from_issue))
# importer.post_process_comments()
| 28.101695 | 82 | 0.747889 | 230 | 1,658 | 5.243478 | 0.4 | 0.046434 | 0.034826 | 0.034826 | 0.043118 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00641 | 0.153197 | 1,658 | 58 | 83 | 28.586207 | 0.852564 | 0.0193 | 0 | 0 | 0 | 0 | 0.214847 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0.028571 | 0.285714 | 0 | 0.342857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2af1f084a9b2f629ef4de06c655a6be14cf64947 | 3,293 | py | Python | drf_problems/exceptions.py | goatwu1993/drf-problems | 6fd59fadbf8cd8e5623964cb0989993cc91d32f2 | [
"MIT"
] | null | null | null | drf_problems/exceptions.py | goatwu1993/drf-problems | 6fd59fadbf8cd8e5623964cb0989993cc91d32f2 | [
"MIT"
] | null | null | null | drf_problems/exceptions.py | goatwu1993/drf-problems | 6fd59fadbf8cd8e5623964cb0989993cc91d32f2 | [
"MIT"
] | null | null | null | import logging
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from rest_framework import exceptions
from rest_framework.reverse import reverse
from rest_framework.views import exception_handler as drf_exception_handler
from drf_problems.utils import register
logger = logging.getLogger('drf_problems')
def exception_handler(exc, context):
# Convert Django exceptions (from DRF).
if isinstance(exc, Http404):
exc = exceptions.NotFound()
elif isinstance(exc, PermissionDenied):
exc = exceptions.PermissionDenied()
elif not isinstance(exc, exceptions.APIException):
# Fallback handler to convert remaining exceptions to API exception.
logger.exception(exc)
exc = exceptions.APIException(exc)
request = context['request']
response = drf_exception_handler(exc, context)
data = response.data
problem_title = getattr(exc, 'title', exc.default_detail)
problem_status = response.status_code
problem_code = getattr(exc, 'code', exc.default_code)
problem_type = reverse('drf_problems:error-documentation',
kwargs={'code': problem_code}, request=request)
if isinstance(data, dict):
data['title'] = problem_title
data['status'] = problem_status
data['type'] = problem_type
else:
data = dict(errors=response.data, title=problem_title,
status=problem_status, type=problem_type)
try:
if request.accepted_renderer.format == 'json':
response.content_type = 'application/problem+json'
except AttributeError:
pass
response.data = data
return response
@register
class InvalidVersionRequestedException(exceptions.NotAcceptable):
default_code = 'invalid_version'
default_detail = _('Invalid API version provided.')
format_detail = _('Provided version "{request_version}" is invalid.')
description = _(
'Malformed or unsupported version string is provided with the request.')
def __init__(self, request_version, detail=None, code=None):
if detail is None:
detail = force_text(self.format_detail).format(
request_version=request_version)
super().__init__(detail, code)
@register
class DeprecatedVersionUsedException(exceptions.PermissionDenied):
default_code = 'deprecated_version'
default_detail = _('Deprecated API version provided.')
format_detail = _(
'Minimum version supported is "{min_version}", but the request used "{request_version}"')
description = _(
'API only supports versions above the minimum requirement.')
def __init__(self, request_version, min_version, detail=None, code=None):
"""Exception thrown when deprecated version of API is used.
Positional Arguments:
request_version -- API version provided by Django Request
min_version -- Minimum API version to use with this API
"""
if detail is None:
detail = force_text(self.format_detail).format(
request_version=request_version, min_version=min_version)
super().__init__(detail, code)
| 37.420455 | 97 | 0.704828 | 368 | 3,293 | 6.092391 | 0.293478 | 0.0562 | 0.022748 | 0.023194 | 0.158787 | 0.070473 | 0.070473 | 0.070473 | 0.070473 | 0.070473 | 0 | 0.002314 | 0.212572 | 3,293 | 87 | 98 | 37.850575 | 0.862322 | 0.092013 | 0 | 0.153846 | 0 | 0 | 0.156271 | 0.018983 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046154 | false | 0.015385 | 0.138462 | 0 | 0.353846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2af5b87dab6ad54c5609f4f22a929293047526c1 | 4,060 | py | Python | OA-synthetic-network/code/IZ_moves_to_OA_moves.py | Kao-Group/SCoVMod | db2c4852e5e4e13e4897f1562b3952896dcfb451 | [
"BSD-4-Clause-UC"
] | 2 | 2020-06-30T18:13:51.000Z | 2020-07-22T09:57:09.000Z | OA-synthetic-network/code/IZ_moves_to_OA_moves.py | Kao-Group/SCoVMod | db2c4852e5e4e13e4897f1562b3952896dcfb451 | [
"BSD-4-Clause-UC"
] | null | null | null | OA-synthetic-network/code/IZ_moves_to_OA_moves.py | Kao-Group/SCoVMod | db2c4852e5e4e13e4897f1562b3952896dcfb451 | [
"BSD-4-Clause-UC"
] | null | null | null | import pandas as pd
import numpy.random as rdm
df=pd.read_csv('../Output/OA_info_for_movement_model.csv')
IZ_list=list(set(df['intermediate_zone'].tolist()))
OAs_in_IZ={}
for IZ in IZ_list:
IZ_df=df[(df['intermediate_zone']==IZ)]
OAs_in_IZ[IZ]=list(set(IZ_df['output_area']))
# worker and household densities will be needed for each OA
worker_density={}
household_density={}
northing={}
easting={}
for i,row in df.iterrows():
worker_density[row['output_area']]=row['WF_population']
household_density[row['output_area']]=row['working_age_population']
northing[row['output_area']]=row['northing']
easting[row['output_area']]=row['easting']
#
###### NEEDS EDITING TO READ IN DOWNLOADABLE DATA ######
### WU01SC_IZ2011_Scotland ###########################
#
##df=pd.read_csv('../wu01uk_scotland_intermediate_areas_(ewan_edit).csv')
##print(df.head())
#df=pd.read_excel('../WU01SC_IZ2011_Scotland.xlsx',sheet_name='Persons')
df=pd.read_csv('../output/wu01uk_scotland_IZ.csv')
#IZ_list2=list(set(df['Area of Usual Residence'].tolist()+df['Area of Workplace'].tolist()))
# the final putput will be a df of this dictionary
dic={'household_OA':[],'workplace_OA':[]}
# loop over every origin destination combo
for index, row in df.iterrows():
if index % 1000==0:
print(index)
# # check that its in the list of IZs for Scotland
# if row['origin'] in IZ_list and row['destination'] in IZ_list:
IZ_workers=row['moves']
#print(row['origin'],row['destination'])
# for every working individual we individual
# we asign an OA for their household
# and an OA for their workplace
# get the potential workplace OAs for the folks in the destination ML
workplace_candidates=OAs_in_IZ[row['destination']]
# need the total desnity of workers to do this
total_workers=sum([worker_density[OA] for OA in workplace_candidates])
# get their potential household OAs in the origin ML
household_candidates=OAs_in_IZ[row['origin']]
# one worker at a time
while IZ_workers:
# step 1 choose their workplace OA
# with probability proportional to OA worker density
# to select with probability proportional to the worker density
r=rdm.random()
slider=0
j=0
while r>slider:
workplace_OA=workplace_candidates[j]
slider=slider+(worker_density[workplace_OA]/total_workers)
j=j+1
# get the location
y_j=northing[workplace_OA]
x_j=easting[workplace_OA]
# step 2 choose their household
# calculate the distance-population function to all the candidates
total_weight=0
weight={}
#
for household_OA in household_candidates:
#N=household_density[household_OA]
# get the location
y_i=northing[workplace_OA]
x_i=easting[workplace_OA]
#calcualte the distance
distance=(((x_i-x_j)**2)+(y_i-y_j)**2)**(1/2)
#weight[household_OA]=N*((1+distance)**(-3))
total_weight=total_weight+household_density[household_OA]
# then choose proportionally to the weight
r=rdm.random()
slider=0
i=0
while r>slider:
household_OA=household_candidates[i]
slider=slider+(household_density[household_OA]/total_weight)
i=i+1
# add results to the dictionary
dic['household_OA'].append(household_OA)
dic['workplace_OA'].append(workplace_OA)
# remove the person from household OA and the workplace OA
household_density[household_OA]=household_density[household_OA]-1
#worker_density[workplace_OA]=worker_density[workplace_OA]-1
# remove one worker as they now have a workpplace and household
IZ_workers=IZ_workers-1
#save results
df=pd.DataFrame(dic)
df.to_csv('../output/OA_synthetic_movements.csv',index=False)
| 33.553719 | 92 | 0.652956 | 552 | 4,060 | 4.608696 | 0.268116 | 0.056211 | 0.049135 | 0.053066 | 0.082547 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012816 | 0.231281 | 4,060 | 121 | 93 | 33.553719 | 0.802307 | 0.382512 | 0 | 0.107143 | 0 | 0 | 0.130668 | 0.053586 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.035714 | 0 | 0.035714 | 0.017857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2af79923bd40625f6415a058b45319c413a02cbf | 909 | py | Python | userbot/plugins/angry.py | Declan57/SPARKZZZ | 4fb9ae8581b96a7040700ff1dc7244cd19f08e40 | [
"MIT"
] | 1 | 2020-10-04T10:08:43.000Z | 2020-10-04T10:08:43.000Z | userbot/plugins/angry.py | ranijithhub/SPARKZZZ | 4fcbd15d7466b71261f8b437b0654d9e5cde1b55 | [
"MIT"
] | null | null | null | userbot/plugins/angry.py | ranijithhub/SPARKZZZ | 4fcbd15d7466b71261f8b437b0654d9e5cde1b55 | [
"MIT"
] | null | null | null | """Emoji
Available Commands:
.angry"""
from telethon import events
import asyncio
from userbot.utils import admin_cmd
@borg.on(admin_cmd("angry"))
async def _(event):
if event.fwd_from:
return
animation_interval = 3
animation_ttl = range(0, 18)
#await event.edit(input_str)
await event.edit("I am getting angry now")
animation_chars = [
"😡😡😡",
"I am angry with you",
"Just shut up",
"And RUN Away NOW",
"Or else",
"I would call CEO of Telegram",
"My friend is also a hacker...",
"I would call him if you don't shut up",
"🤬🤬Warning you, Don't repeat it again and shut up now...🤬🤬",
"🤬🤬🤬🤬🤬 BSDK ab toh chup ho ja."
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 18])
| 25.25 | 71 | 0.569857 | 126 | 909 | 4.119048 | 0.611111 | 0.057803 | 0.080925 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009709 | 0.320132 | 909 | 35 | 72 | 25.971429 | 0.81068 | 0.066007 | 0 | 0 | 0 | 0 | 0.313167 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.12 | 0 | 0.16 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2af9bff82ea4602a6d0ab8c96843bad10b4fc313 | 4,587 | py | Python | AsyncGear/run_when.py | monk-after-90s/AsyncGear | 6773d38d564c21bbb2f9a0d4fd14a0c24b541ece | [
"MIT"
] | 4 | 2021-01-06T06:14:04.000Z | 2022-01-12T05:32:03.000Z | AsyncGear/run_when.py | monk-after-90s/AsyncGear | 6773d38d564c21bbb2f9a0d4fd14a0c24b541ece | [
"MIT"
] | 1 | 2021-08-05T09:54:30.000Z | 2021-08-05T10:43:33.000Z | AsyncGear/run_when.py | monk-after-90s/AsyncGear | 6773d38d564c21bbb2f9a0d4fd14a0c24b541ece | [
"MIT"
] | null | null | null | import asyncio
def _run_when(obj, time_method: str, period_name: str, queue_blocking='abandon'):
'''
Decorator, run the decorated when obj is at the exact moment or period.
:param time_method: enter, exit, inside, outside
:param obj:
:param period_name:
:param queue_blocking: When the decorated is activated too frequently, non_block means run immediately anyway; queue means
waits the previous one completing then run the new activated; abandon means abandon the new
activated if the previous one has not completes yet.
:return:
'''
def decrator(decorated):
async def runner():
if asyncio.iscoroutinefunction(decorated):
if queue_blocking == 'abandon':
pending_task: asyncio.Task = None
elif queue_blocking == 'queue':
q = asyncio.Queue()
# Get q item to run decorated
async def queue2run_coroutine():
while True:
await asyncio.create_task(q.get())
await asyncio.create_task(decorated())
q.task_done()
asyncio.create_task(queue2run_coroutine())
while True:
# wait the exact time
from .Gear import Gear
await asyncio.create_task(getattr(Gear(obj),
{'enter': 'wait_enter_period',
'exit': 'wait_exit_period',
'inside': 'wait_inside_period',
'outside': 'wait_outside_period'}[time_method])(period_name))
if not asyncio.iscoroutinefunction(decorated):
decorated()
else:
if queue_blocking == 'non_block':
asyncio.create_task(decorated())
elif queue_blocking == 'abandon':
if not bool(pending_task) or pending_task.done(): # previous completes
pending_task = asyncio.create_task(decorated())
elif bool(pending_task):
await pending_task
elif queue_blocking == 'queue':
q.put_nowait(None)
runner_task = asyncio.create_task(runner())
from .Gear import Gear
Gear(obj).assistant_tasks.append(runner_task)
return decorated
return decrator
def run_when_enter(obj, period_name: str, queue_blocking='abandon'):
'''
Decorator, run the decorated when obj enters the period.
:param obj:
:param period_name:
:param queue_blocking: When the decorated is activated too frequently, 'non_block' means run immediately anyway; 'queue' means
waits the previous one completing then run the new activated; 'abandon' means abandon the new
activated if the previous one has not completed yet.
:return:
'''
return _run_when(obj, 'enter', period_name, queue_blocking)
def run_when_exit(obj, period_name: str, queue_blocking='abandon'):
'''
Decorator, run the decorated when obj exits the period.
:param obj:
:param period_name:
:param queue_blocking: When the decorated is activated too frequently, 'non_block' means run immediately anyway; 'queue' means
waits the previous one completing then run the new activated; 'abandon' means abandon the new
activated if the previous one has not completed yet.
:return:
'''
return _run_when(obj, 'exit', period_name, queue_blocking)
def run_when_inside(obj, period_name: str):
'''
Decorator, run the decorated when obj is inside the period. The queue blocking style is 'abandon', which means abandon the new
activated if the previous one has not completed yet.
:param obj:
:param period_name:
:return:
'''
return _run_when(obj, 'inside', period_name, 'abandon')
def run_when_outside(obj, period_name: str):
'''
Decorator, run the decorated when obj is outside the period. The queue blocking style is 'abandon', which means abandon the new
activated if the previous one has not completed yet.
:param obj:
:param period_name:
:return:
'''
return _run_when(obj, 'outside', period_name, 'abandon')
| 40.59292 | 131 | 0.580772 | 508 | 4,587 | 5.084646 | 0.161417 | 0.058072 | 0.04336 | 0.046458 | 0.59969 | 0.558653 | 0.558653 | 0.532327 | 0.532327 | 0.532327 | 0 | 0.000667 | 0.346414 | 4,587 | 112 | 132 | 40.955357 | 0.860907 | 0.400698 | 0 | 0.12766 | 0 | 0 | 0.071011 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12766 | false | 0 | 0.06383 | 0 | 0.319149 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2afa0c228ca10cf682be6e28673c9cc970ae83e7 | 1,804 | py | Python | tests/prediction_utils_test.py | ltfschoen/ML-Predictions | 255793629d86b05a841041f894a318da1fa3ce63 | [
"MIT"
] | 5 | 2017-05-10T09:03:56.000Z | 2018-01-10T05:41:36.000Z | tests/prediction_utils_test.py | ltfschoen/ML-Predictions | 255793629d86b05a841041f894a318da1fa3ce63 | [
"MIT"
] | 14 | 2017-05-12T21:14:04.000Z | 2017-05-18T08:32:18.000Z | tests/prediction_utils_test.py | ltfschoen/ML-Predictions | 255793629d86b05a841041f894a318da1fa3ce63 | [
"MIT"
] | 3 | 2017-10-13T01:30:06.000Z | 2021-08-14T18:17:58.000Z | import unittest
import sys
import site
def get_main_path():
test_path = sys.path[0] # sys.path[0] is current path in lib subdirectory
split_on_char = "/"
return split_on_char.join(test_path.split(split_on_char)[:-1])
main_path = get_main_path()
site.addsitedir(main_path+'/tests')
site.addsitedir(main_path+'/lib')
print ("Imported subfolder: %s" % (main_path+'/tests') )
from tests.input_event_test import EVENT
from prediction_config import PredictionConfig
from prediction_utils import PredictionUtils
from prediction_data import PredictionData
class PredictionDataTestCase(unittest.TestCase):
"""Tests for `prediction_utils.py`."""
def setUp(self):
self.prediction_config = PredictionConfig(EVENT, None)
self.prediction_utils = PredictionUtils(self.prediction_config)
self.prediction_data = PredictionData(self.prediction_config, self.prediction_utils)
self.dataset_choice = self.prediction_config.DATASET_CHOICE
def tearDown(self):
del self.prediction_config
del self.prediction_utils
del self.prediction_data
del self.dataset_choice
def test_calc_sensitivity(self):
# Setup
_count_true_positives = 4
_count_false_negatives = 6
_expected_sensitivity = 0.4
# Test
self.assertEqual(self.prediction_utils.calc_sensitivity(_count_true_positives, _count_false_negatives), _expected_sensitivity)
def test_calc_specificity(self):
# Setup
_count_true_negatives = 4
_count_false_positives = 6
_expected_sensitivity = 0.4
# Test
self.assertEqual(self.prediction_utils.calc_sensitivity(_count_true_negatives, _count_false_positives), _expected_sensitivity)
if __name__ == '__main__':
unittest.main()
| 31.103448 | 134 | 0.734479 | 218 | 1,804 | 5.697248 | 0.288991 | 0.135266 | 0.080515 | 0.035427 | 0.190016 | 0.135266 | 0.135266 | 0.135266 | 0.135266 | 0.135266 | 0 | 0.007483 | 0.185144 | 1,804 | 57 | 135 | 31.649123 | 0.837415 | 0.057095 | 0 | 0.052632 | 0 | 0 | 0.027794 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.131579 | false | 0 | 0.210526 | 0 | 0.394737 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2afa166b563dd6045cc224c65c0dad79280243f7 | 986 | py | Python | Chapter11/datasets.py | dvschultz/Hands-On-Generative-Adversarial-Networks-with-PyTorch-1.x | f76231fc8848940f469c647e077edc5d68de4aa1 | [
"MIT"
] | 50 | 2019-12-24T08:02:13.000Z | 2022-03-17T10:10:02.000Z | Chapter11/datasets.py | dvschultz/Hands-On-Generative-Adversarial-Networks-with-PyTorch-1.x | f76231fc8848940f469c647e077edc5d68de4aa1 | [
"MIT"
] | 3 | 2020-05-15T09:06:39.000Z | 2020-09-10T14:55:49.000Z | Chapter11/datasets.py | YETsong/gan | d206e50bbbf6ccfad9151220e5f044bc74e849a7 | [
"MIT"
] | 40 | 2019-12-15T02:46:26.000Z | 2022-02-25T14:46:25.000Z | # datasets.py
# B11764 Chapter 11
# ==============================================
import os
import numpy as np
import scipy.ndimage as nd
import scipy.io as io
import torch
from torch.utils.data import Dataset
def getVoxelFromMat(path, cube_len=64):
voxels = io.loadmat(path)['instance']
voxels = np.pad(voxels, (1, 1), 'constant', constant_values=(0, 0))
if cube_len != 32 and cube_len == 64:
voxels = nd.zoom(voxels, (2, 2, 2), mode='constant', order=0)
return voxels
class ShapeNetDataset(Dataset):
def __init__(self, root, cube_len):
self.root = root
self.listdir = os.listdir(self.root)
self.cube_len = cube_len
def __getitem__(self, index):
with open(os.path.join(self.root, self.listdir[index]), "rb") as f:
volume = np.asarray(getVoxelFromMat(
f, self.cube_len), dtype=np.float32)
return torch.FloatTensor(volume)
def __len__(self):
return len(self.listdir)
| 28.171429 | 75 | 0.619675 | 134 | 986 | 4.410448 | 0.440299 | 0.08291 | 0.030457 | 0.050761 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029831 | 0.218053 | 986 | 34 | 76 | 29 | 0.736706 | 0.077079 | 0 | 0 | 0 | 0 | 0.028698 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.25 | 0.041667 | 0.583333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2afadd83de40223af511470c5208c6e4f3fb42e5 | 741 | py | Python | examples/quadratic_function.py | mchalecki/cmaes | 8897a4b3d40f0120c14ddc78c8ded0852e627899 | [
"MIT"
] | 134 | 2020-01-31T01:17:33.000Z | 2021-08-14T18:36:00.000Z | examples/quadratic_function.py | mchalecki/cmaes | 8897a4b3d40f0120c14ddc78c8ded0852e627899 | [
"MIT"
] | 74 | 2020-01-30T20:18:09.000Z | 2021-04-10T16:53:31.000Z | examples/quadratic_function.py | mchalecki/cmaes | 8897a4b3d40f0120c14ddc78c8ded0852e627899 | [
"MIT"
] | 32 | 2020-01-30T20:32:51.000Z | 2021-07-21T14:09:06.000Z | import numpy as np
from cmaes import CMA
def quadratic(x1, x2):
return (x1 - 3) ** 2 + (10 * (x2 + 2)) ** 2
def main():
optimizer = CMA(mean=np.zeros(2), sigma=1.3)
print(" g f(x1,x2) x1 x2 ")
print("=== ========== ====== ======")
while True:
solutions = []
for _ in range(optimizer.population_size):
x = optimizer.ask()
value = quadratic(x[0], x[1])
solutions.append((x, value))
print(
f"{optimizer.generation:3d} {value:10.5f}"
f" {x[0]:6.2f} {x[1]:6.2f}"
)
optimizer.tell(solutions)
if optimizer.should_stop():
break
if __name__ == "__main__":
main()
| 23.15625 | 59 | 0.466937 | 90 | 741 | 3.722222 | 0.533333 | 0.035821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.060543 | 0.353576 | 741 | 31 | 60 | 23.903226 | 0.638831 | 0 | 0 | 0 | 0 | 0 | 0.183536 | 0.033738 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.086957 | 0.043478 | 0.217391 | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2afe63b0016394538cfe347c8a46893f417739c6 | 12,581 | py | Python | src/build/toolchain/apple/linker_driver.py | Chilledheart/naiveproxy | 9d28da89b325a90d33add830f4202c8b17c7c3e3 | [
"BSD-3-Clause"
] | null | null | null | src/build/toolchain/apple/linker_driver.py | Chilledheart/naiveproxy | 9d28da89b325a90d33add830f4202c8b17c7c3e3 | [
"BSD-3-Clause"
] | null | null | null | src/build/toolchain/apple/linker_driver.py | Chilledheart/naiveproxy | 9d28da89b325a90d33add830f4202c8b17c7c3e3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import os.path
import re
import shutil
import subprocess
import sys
# Prefix for all custom linker driver arguments.
LINKER_DRIVER_ARG_PREFIX = '-Wcrl,'
# The linker_driver.py is responsible for forwarding a linker invocation to
# the compiler driver, while processing special arguments itself.
#
# Usage: linker_driver.py clang++ main.o -L. -llib -o prog -Wcrl,dsym,out
#
# On Mac, the logical step of linking is handled by three discrete tools to
# perform the image link, debug info link, and strip. The linker_driver.py
# combines these three steps into a single tool.
#
# The command passed to the linker_driver.py should be the compiler driver
# invocation for the linker. It is first invoked unaltered (except for the
# removal of the special driver arguments, described below). Then the driver
# performs additional actions, based on these arguments:
#
# -Wcrl,dsym,<dsym_path_prefix>
# After invoking the linker, this will run `dsymutil` on the linker's
# output, producing a dSYM bundle, stored at dsym_path_prefix. As an
# example, if the linker driver were invoked with:
# "... -o out/gn/obj/foo/libbar.dylib ... -Wcrl,dsym,out/gn ..."
# The resulting dSYM would be out/gn/libbar.dylib.dSYM/.
#
# -Wcrl,dsymutilpath,<dsymutil_path>
# Sets the path to the dsymutil to run with -Wcrl,dsym, in which case
# `xcrun` is not used to invoke it.
#
# -Wcrl,unstripped,<unstripped_path_prefix>
# After invoking the linker, and before strip, this will save a copy of
# the unstripped linker output in the directory unstripped_path_prefix.
#
# -Wcrl,strip,<strip_arguments>
# After invoking the linker, and optionally dsymutil, this will run
# the strip command on the linker's output. strip_arguments are
# comma-separated arguments to be passed to the strip command.
#
# -Wcrl,strippath,<strip_path>
# Sets the path to the strip to run with -Wcrl,strip, in which case
# `xcrun` is not used to invoke it.
class LinkerDriver(object):
def __init__(self, args):
"""Creates a new linker driver.
Args:
args: list of string, Arguments to the script.
"""
if len(args) < 2:
raise RuntimeError("Usage: linker_driver.py [linker-invocation]")
self._args = args
# List of linker driver actions. **The sort order of this list affects
# the order in which the actions are invoked.**
# The first item in the tuple is the argument's -Wcrl,<sub_argument>
# and the second is the function to invoke.
self._actions = [
('dsymutilpath,', self.set_dsymutil_path),
('dsym,', self.run_dsymutil),
('unstripped,', self.run_save_unstripped),
('strippath,', self.set_strip_path),
('strip,', self.run_strip),
]
# Linker driver actions can modify the these values.
self._dsymutil_cmd = ['xcrun', 'dsymutil']
self._strip_cmd = ['xcrun', 'strip']
# The linker output file, lazily computed in self._get_linker_output().
self._linker_output = None
def run(self):
"""Runs the linker driver, separating out the main compiler driver's
arguments from the ones handled by this class. It then invokes the
required tools, starting with the compiler driver to produce the linker
output.
"""
# Collect arguments to the linker driver (this script) and remove them
# from the arguments being passed to the compiler driver.
linker_driver_actions = {}
compiler_driver_args = []
for index, arg in enumerate(self._args[1:]):
if arg.startswith(LINKER_DRIVER_ARG_PREFIX):
# Convert driver actions into a map of name => lambda to invoke.
driver_action = self._process_driver_arg(arg)
assert driver_action[0] not in linker_driver_actions
linker_driver_actions[driver_action[0]] = driver_action[1]
else:
compiler_driver_args.append(arg)
if self._get_linker_output() is None:
raise ValueError(
'Could not find path to linker output (-o or --output)')
linker_driver_outputs = [self._get_linker_output()]
try:
# Zero the mtime in OSO fields for deterministic builds.
# https://crbug.com/330262.
env = os.environ.copy()
env['ZERO_AR_DATE'] = '1'
# Run the linker by invoking the compiler driver.
subprocess.check_call(compiler_driver_args, env=env)
# Run the linker driver actions, in the order specified by the
# actions list.
for action in self._actions:
name = action[0]
if name in linker_driver_actions:
linker_driver_outputs += linker_driver_actions[name]()
except:
# If a linker driver action failed, remove all the outputs to make
# the build step atomic.
map(_remove_path, linker_driver_outputs)
# Re-report the original failure.
raise
def _get_linker_output(self):
"""Returns the value of the output argument to the linker."""
if not self._linker_output:
for index, arg in enumerate(self._args):
if arg in ('-o', '-output', '--output'):
self._linker_output = self._args[index + 1]
break
return self._linker_output
def _process_driver_arg(self, arg):
"""Processes a linker driver argument and returns a tuple containing the
name and unary lambda to invoke for that linker driver action.
Args:
arg: string, The linker driver argument.
Returns:
A 2-tuple:
0: The driver action name, as in |self._actions|.
1: A lambda that calls the linker driver action with its direct
argument and returns a list of outputs from the action.
"""
if not arg.startswith(LINKER_DRIVER_ARG_PREFIX):
raise ValueError('%s is not a linker driver argument' % (arg, ))
sub_arg = arg[len(LINKER_DRIVER_ARG_PREFIX):]
for driver_action in self._actions:
(name, action) = driver_action
if sub_arg.startswith(name):
return (name, lambda: action(sub_arg[len(name):]))
raise ValueError('Unknown linker driver argument: %s' % (arg, ))
def run_dsymutil(self, dsym_path_prefix):
"""Linker driver action for -Wcrl,dsym,<dsym-path-prefix>. Invokes
dsymutil on the linker's output and produces a dsym file at |dsym_file|
path.
Args:
dsym_path_prefix: string, The path at which the dsymutil output
should be located.
Returns:
list of string, Build step outputs.
"""
if not len(dsym_path_prefix):
raise ValueError('Unspecified dSYM output file')
linker_output = self._get_linker_output()
base = os.path.basename(linker_output)
dsym_out = os.path.join(dsym_path_prefix, base + '.dSYM')
# Remove old dSYMs before invoking dsymutil.
_remove_path(dsym_out)
tools_paths = _find_tools_paths(self._args)
if os.environ.get('PATH'):
tools_paths.append(os.environ['PATH'])
dsymutil_env = os.environ.copy()
dsymutil_env['PATH'] = ':'.join(tools_paths)
# Run dsymutil and redirect stdout and stderr to the same pipe.
process = subprocess.Popen(self._dsymutil_cmd +
['-o', dsym_out, linker_output],
env=dsymutil_env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout = process.communicate()[0].decode('utf-8')
# Filter the output to remove excessive log spam generated by a
# combination of ldd, icf and dsymutil.
# TODO(crbug.com/1279639): Fix in dsymutil itself.
stdout = _filter_dsym_output(stdout)
if stdout:
sys.stderr.write(stdout)
return [dsym_out]
def set_dsymutil_path(self, dsymutil_path):
"""Linker driver action for -Wcrl,dsymutilpath,<dsymutil_path>.
Sets the invocation command for dsymutil, which allows the caller to
specify an alternate dsymutil. This action is always processed before
the RunDsymUtil action.
Args:
dsymutil_path: string, The path to the dsymutil binary to run
Returns:
No output - this step is run purely for its side-effect.
"""
self._dsymutil_cmd = [dsymutil_path]
return []
def run_save_unstripped(self, unstripped_path_prefix):
"""Linker driver action for -Wcrl,unstripped,<unstripped_path_prefix>.
Copies the linker output to |unstripped_path_prefix| before stripping.
Args:
unstripped_path_prefix: string, The path at which the unstripped
output should be located.
Returns:
list of string, Build step outputs.
"""
if not len(unstripped_path_prefix):
raise ValueError('Unspecified unstripped output file')
base = os.path.basename(self._get_linker_output())
unstripped_out = os.path.join(unstripped_path_prefix,
base + '.unstripped')
shutil.copyfile(self._get_linker_output(), unstripped_out)
return [unstripped_out]
def run_strip(self, strip_args_string):
"""Linker driver action for -Wcrl,strip,<strip_arguments>.
Args:
strip_args_string: string, Comma-separated arguments for `strip`.
Returns:
list of string, Build step outputs.
"""
strip_command = list(self._strip_cmd)
if len(strip_args_string) > 0:
strip_command += strip_args_string.split(',')
strip_command.append(self._get_linker_output())
subprocess.check_call(strip_command)
return []
def set_strip_path(self, strip_path):
"""Linker driver action for -Wcrl,strippath,<strip_path>.
Sets the invocation command for strip, which allows the caller to
specify an alternate strip. This action is always processed before the
RunStrip action.
Args:
strip_path: string, The path to the strip binary to run
Returns:
No output - this step is run purely for its side-effect.
"""
self._strip_cmd = [strip_path]
return []
# Regular expressions matching log spam messages from dsymutil.
DSYM_SPURIOUS_PATTERNS = [
re.compile(v) for v in [
r'failed to insert symbol',
r'could not find object file symbol for symbol',
]
]
def _matches_dsym_spurious_patterns(line):
"""Returns True if |line| matches one of DSYM_SPURIOUS_PATTERNS."""
for pattern in DSYM_SPURIOUS_PATTERNS:
if pattern.search(line) is not None:
return True
return False
def _filter_dsym_output(dsymutil_output):
"""Filers dsymutil output to remove excessive log spam.
Args:
dsymutil_output: string containing the output generated by dsymutil
(contains both stdout and stderr)
Returns:
The filtered output of dsymutil.
"""
filtered_output = []
for line in dsymutil_output.splitlines():
if _matches_dsym_spurious_patterns(line):
continue
filtered_output.append(line + '\n')
return ''.join(filtered_output)
def _find_tools_paths(full_args):
"""Finds all paths where the script should look for additional tools."""
paths = []
for idx, arg in enumerate(full_args):
if arg in ['-B', '--prefix']:
paths.append(full_args[idx + 1])
elif arg.startswith('-B'):
paths.append(arg[2:])
elif arg.startswith('--prefix='):
paths.append(arg[9:])
return paths
def _remove_path(path):
"""Removes the file or directory at |path| if it exists."""
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.unlink(path)
if __name__ == '__main__':
LinkerDriver(sys.argv).run()
sys.exit(0)
| 36.6793 | 80 | 0.634608 | 1,609 | 12,581 | 4.806091 | 0.205718 | 0.057416 | 0.017458 | 0.017199 | 0.224751 | 0.184017 | 0.091944 | 0.061296 | 0.042416 | 0.042416 | 0 | 0.003989 | 0.282728 | 12,581 | 342 | 81 | 36.78655 | 0.852948 | 0.458946 | 0 | 0.034965 | 0 | 0 | 0.074519 | 0 | 0 | 0 | 0 | 0.002924 | 0.006993 | 1 | 0.090909 | false | 0 | 0.041958 | 0 | 0.216783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aff423941c8ce0bc1885b92a4fcb673fb26ff3d | 451 | py | Python | cogs/affirmationcog.py | IshaanIvaturi/rucs24-bot | 27735d9247ca97a66c70164cad7d74df367dc955 | [
"MIT"
] | 6 | 2020-08-29T15:41:35.000Z | 2020-10-28T20:12:02.000Z | cogs/affirmationcog.py | IshaanIvaturi/rucs24-bot | 27735d9247ca97a66c70164cad7d74df367dc955 | [
"MIT"
] | 73 | 2020-08-26T19:32:40.000Z | 2020-11-23T05:16:51.000Z | cogs/affirmationcog.py | IshaanIvaturi/rucs24-bot | 27735d9247ca97a66c70164cad7d74df367dc955 | [
"MIT"
] | 17 | 2020-08-26T21:15:41.000Z | 2020-11-10T02:02:07.000Z | import requests
import discord
from discord.ext import commands
class AffirmationCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def affirmation(self, ctx):
"""Tells you a positive affirmation"""
r = requests.get("https://www.affirmations.dev/")
apidata = r.json()
await ctx.send(apidata["affirmation"])
def setup(bot):
bot.add_cog(AffirmationCog(bot))
| 22.55 | 57 | 0.658537 | 55 | 451 | 5.309091 | 0.6 | 0.047945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.217295 | 451 | 19 | 58 | 23.736842 | 0.827195 | 0 | 0 | 0 | 0 | 0 | 0.096852 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.230769 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2aff9487fecaba2b4bb47bc8bf831dcbe8258beb | 5,549 | py | Python | aas_core_codegen/rdf_shacl/common.py | gillistephan/aas-core-codegen | 5b89ea2ee35aecaca9a1bed7ac81d420cc560f29 | [
"MIT"
] | null | null | null | aas_core_codegen/rdf_shacl/common.py | gillistephan/aas-core-codegen | 5b89ea2ee35aecaca9a1bed7ac81d420cc560f29 | [
"MIT"
] | null | null | null | aas_core_codegen/rdf_shacl/common.py | gillistephan/aas-core-codegen | 5b89ea2ee35aecaca9a1bed7ac81d420cc560f29 | [
"MIT"
] | null | null | null | """Provide common functions for both RDF and SHACL generators."""
from typing import MutableMapping, Tuple, Optional, List, Union
from icontract import ensure
from aas_core_codegen import intermediate, specific_implementations
from aas_core_codegen.rdf_shacl import naming as rdf_shacl_naming
from aas_core_codegen.common import (
Stripped,
Error,
assert_union_without_excluded,
assert_never,
)
INDENT = " "
INDENT2 = INDENT * 2
INDENT3 = INDENT * 3
INDENT4 = INDENT * 4
def string_literal(text: str) -> Stripped:
"""Generate a valid and escaped string literal based on the free-form ``text``."""
if len(text) == 0:
return Stripped('""')
escaped = text.replace("\\", "\\\\").replace('"', '\\"').replace("\n", "\\n")
return Stripped(f'"{escaped}"')
ClassToRdfsRange = MutableMapping[intermediate.ClassUnion, Stripped]
@ensure(lambda result: (result[0] is not None) ^ (result[1] is not None))
def map_class_to_rdfs_range(
symbol_table: intermediate.SymbolTable,
spec_impls: specific_implementations.SpecificImplementations,
) -> Tuple[Optional[ClassToRdfsRange], Optional[Error]]:
"""
Iterate over all the symbols and determine their value as ``rdfs:range``.
This also applies for ``sh:datatype`` in SHACL.
"""
class_to_rdfs_range = dict() # type: ClassToRdfsRange
errors = [] # type: List[Error]
for symbol in symbol_table.symbols:
if isinstance(symbol, (intermediate.AbstractClass, intermediate.ConcreteClass)):
if symbol.is_implementation_specific:
implementation_key = specific_implementations.ImplementationKey(
f"rdf/{symbol.name}/as_rdfs_range.ttl"
)
implementation = spec_impls.get(implementation_key, None)
if implementation is None:
errors.append(
Error(
symbol.parsed.node,
f"The implementation snippet for "
f"how to represent the class {symbol.parsed.name} "
f"as ``rdfs:range`` is missing: {implementation_key}",
)
)
else:
class_to_rdfs_range[symbol] = implementation
else:
class_to_rdfs_range[symbol] = Stripped(
f"aas:{rdf_shacl_naming.class_name(symbol.name)}"
)
if len(errors) > 0:
return None, Error(
None,
"Failed to determine the mapping symbol 🠒 ``rdfs:range`` "
"for one or more symbols",
errors,
)
return class_to_rdfs_range, None
def rdfs_range_for_type_annotation(
type_annotation: intermediate.TypeAnnotationUnion,
class_to_rdfs_range: ClassToRdfsRange,
) -> Stripped:
"""Determine the ``rdfs:range`` corresponding to the ``type_annotation``."""
rdfs_range = None # type: Optional[str]
if isinstance(type_annotation, intermediate.PrimitiveTypeAnnotation):
rdfs_range = PRIMITIVE_MAP[type_annotation.a_type]
elif isinstance(type_annotation, intermediate.OurTypeAnnotation):
if isinstance(type_annotation.symbol, intermediate.Enumeration):
cls_name = rdf_shacl_naming.class_name(type_annotation.symbol.name)
rdfs_range = f"aas:{cls_name}"
elif isinstance(
type_annotation.symbol,
(intermediate.AbstractClass, intermediate.ConcreteClass),
):
rdfs_range = class_to_rdfs_range[type_annotation.symbol]
elif isinstance(type_annotation.symbol, intermediate.ConstrainedPrimitive):
rdfs_range = PRIMITIVE_MAP[type_annotation.symbol.constrainee]
else:
assert_never(type_annotation.symbol)
elif isinstance(type_annotation, intermediate.ListTypeAnnotation):
rdfs_range = rdfs_range_for_type_annotation(
type_annotation=type_annotation.items,
class_to_rdfs_range=class_to_rdfs_range,
)
elif isinstance(type_annotation, intermediate.OptionalTypeAnnotation):
rdfs_range = rdfs_range_for_type_annotation(
type_annotation=type_annotation.value,
class_to_rdfs_range=class_to_rdfs_range,
)
else:
assert_never(type_annotation)
assert rdfs_range is not None
return Stripped(rdfs_range)
PRIMITIVE_MAP = {
intermediate.PrimitiveType.BOOL: "xsd:boolean",
intermediate.PrimitiveType.INT: "xsd:integer",
intermediate.PrimitiveType.FLOAT: "xsd:double",
intermediate.PrimitiveType.STR: "xsd:string",
intermediate.PrimitiveType.BYTEARRAY: "xsd:byte",
}
assert all(literal in PRIMITIVE_MAP for literal in intermediate.PrimitiveType)
TypeAnnotationExceptOptional = Union[
intermediate.PrimitiveTypeAnnotation,
intermediate.OurTypeAnnotation,
intermediate.ListTypeAnnotation,
]
assert_union_without_excluded(
original_union=intermediate.TypeAnnotationUnion,
subset_union=TypeAnnotationExceptOptional,
excluded=[intermediate.OptionalTypeAnnotation],
)
def beneath_optional(
type_annotation: intermediate.TypeAnnotationUnion,
) -> TypeAnnotationExceptOptional:
"""Descend below ``Optional[...]`` to the underlying type."""
type_anno = type_annotation
while isinstance(type_anno, intermediate.OptionalTypeAnnotation):
type_anno = type_anno.value
assert not isinstance(type_anno, intermediate.OptionalTypeAnnotation)
return type_anno
| 35.8 | 88 | 0.676699 | 573 | 5,549 | 6.326353 | 0.265271 | 0.069517 | 0.033379 | 0.048552 | 0.270621 | 0.148138 | 0.089931 | 0.052414 | 0.034759 | 0.034759 | 0 | 0.002356 | 0.234997 | 5,549 | 154 | 89 | 36.032468 | 0.851355 | 0.080735 | 0 | 0.08547 | 0 | 0 | 0.076102 | 0.016011 | 0 | 0 | 0 | 0 | 0.068376 | 1 | 0.034188 | false | 0 | 0.042735 | 0 | 0.128205 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63001415862268b14afae4c4f8d960644507591c | 1,136 | py | Python | backend/api/view/curd.py | LiangTang1993/Icarus | c3a4af0f98693a08b850b47ff01091c4e884cc18 | [
"Zlib"
] | 686 | 2015-10-20T09:04:05.000Z | 2022-03-21T14:39:23.000Z | backend/api/view/curd.py | LiangTang1993/Icarus | c3a4af0f98693a08b850b47ff01091c4e884cc18 | [
"Zlib"
] | 25 | 2018-07-18T15:06:36.000Z | 2020-07-06T14:28:38.000Z | backend/api/view/curd.py | LiangTang1993/Icarus | c3a4af0f98693a08b850b47ff01091c4e884cc18 | [
"Zlib"
] | 138 | 2017-12-19T11:49:15.000Z | 2022-03-27T06:43:35.000Z | from typing import Type
from api.user_view_mixin import UserViewMixin
from crud.crud import c
from pycurd.crud.base_crud import BaseCrud
from slim.base.web import JSONResponse
from slim.retcode import RETCODE
from slim.view import CrudView
class BaseCrudView(CrudView):
crud: BaseCrud = c
is_base_class = True
def on_finish(self):
if self._route_info.handler == self.__class__.get:
if self.response.data is None:
self.finish(RETCODE.NOT_FOUND)
if self.response.data and isinstance(self.response.data, dict) and 'code' in self.response.data:
self.response = JSONResponse(200, {
'code': RETCODE.SUCCESS,
'data': self.response.data,
'msg': ''
})
# print(1111, self._route_info.handler, self.__class__.get)
# print(self.response)
def finish(self, code, data=None, msg=None):
self.response = JSONResponse(200, {
'code': code,
'data': data,
'msg': msg
})
class BaseCrudUserView(UserViewMixin, BaseCrudView):
is_base_class = True
| 28.4 | 104 | 0.631162 | 138 | 1,136 | 5.043478 | 0.347826 | 0.137931 | 0.114943 | 0.043103 | 0.181034 | 0.091954 | 0.091954 | 0 | 0 | 0 | 0 | 0.012092 | 0.272007 | 1,136 | 39 | 105 | 29.128205 | 0.829504 | 0.068662 | 0 | 0.214286 | 0 | 0 | 0.024645 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6301c0ebec0616801d4c18fe7d3dca71483ebe36 | 1,853 | py | Python | search_trees.py | mewturn/Python | 8f8285e772271349528b8b4fa7ae9df5e3c45164 | [
"MIT"
] | null | null | null | search_trees.py | mewturn/Python | 8f8285e772271349528b8b4fa7ae9df5e3c45164 | [
"MIT"
] | 1 | 2021-04-26T10:15:43.000Z | 2021-04-26T10:15:43.000Z | search_trees.py | mewturn/Python | 8f8285e772271349528b8b4fa7ae9df5e3c45164 | [
"MIT"
] | null | null | null | class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class Tree:
def __init__(self, arr):
self.root = None
for i in arr:
self.insert(self.root, Node(i))
def insert(self, root, node):
if root is None:
self.root = node
else:
if root.value < node.value:
if root.right is None:
root.right = node
else:
self.insert(root.right, node)
else:
if root.left is None:
root.left = node
else:
self.insert(root.left, node)
# In-order: Left, Root, Right
def inorder(self, node):
if node is None:
return
self.inorder(node.left)
print(node.value)
self.inorder(node.right)
# Pre-order: Root, Left, Right
def preorder(self, node):
if node is None:
return
print(node.value)
self.preorder(node.left)
self.preorder(node.right)
# Post-order: Left, Right, Root
def postorder(self, node):
if node is None:
return
self.postorder(node.left)
self.postorder(node.right)
print(node.value)
def search(self, node, val):
if node is None:
print("Does not exist")
return
if node.value == val:
print("Found", val, node.value, node)
return node.value
if node.value > val:
return search(node.left, val)
return search(node.right, val)
if __name__ == "__main__":
arr = [4,3,5,1,2]
t = Tree(arr)
t.inorder(t.root)
t.postorder(t.root)
t.preorder(t.root)
| 24.381579 | 49 | 0.490556 | 221 | 1,853 | 4.040724 | 0.18552 | 0.080627 | 0.035834 | 0.053751 | 0.145577 | 0.096305 | 0.096305 | 0.067189 | 0 | 0 | 0 | 0.004575 | 0.410146 | 1,853 | 75 | 50 | 24.706667 | 0.812443 | 0.046411 | 0 | 0.258621 | 0 | 0 | 0.015315 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12069 | false | 0 | 0 | 0 | 0.275862 | 0.086207 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63041bada2c63f8f259b537c94e459861f172477 | 4,215 | py | Python | main.py | y252328/captcha-labeling-tool | 5af11b73d840e6ade0c8ef93491a2c172f9cb7f1 | [
"MIT"
] | null | null | null | main.py | y252328/captcha-labeling-tool | 5af11b73d840e6ade0c8ef93491a2c172f9cb7f1 | [
"MIT"
] | null | null | null | main.py | y252328/captcha-labeling-tool | 5af11b73d840e6ade0c8ef93491a2c172f9cb7f1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""GUI labeling tool
Notic
Please modify AppWindow attribute dir to your dataset dir
"""
import sys
import os
from PySide2.QtGui import QPixmap, QImage, QIcon
from PySide2.QtWidgets import QApplication, QMainWindow, QFileDialog, QSizePolicy, QMenu, QMessageBox
from PySide2.QtCore import Slot, Qt, QPoint, Signal, QEvent
from layout import Ui_MainWindow
class AppWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setting = {}
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.dir = '../../captcha'
self.png_dict = self.load_csv()
for name in self.list_png():
if not name in self.png_dict:
self.png_dict[name] = ""
self.png_list = list(self.png_dict.keys())
self.png_list.sort()
self.idx = 0
self.update_img()
self.ui.lableLineEdit.installEventFilter(self)
def list_png(self):
files = os.listdir(self.dir)
png_files = list(filter(lambda name: '.png' in name, files))
png_files.sort()
return png_files
def load_csv(self, name='label.csv'):
png_dict = {}
if os.path.isfile(os.path.join(self.dir, name)):
with open(os.path.join(self.dir, name), 'r') as f:
for name, lable in map(lambda line: line.split(','), f.readlines()):
png_dict[name] = lable.strip()
return png_dict
def save_csv(self, name='label.csv'):
with open(os.path.join(self.dir, name), 'w') as f:
for name in self.png_list:
f.write(name + ',' + self.png_dict[name] + '\n')
print('saved csv')
def update_img(self):
label = self.png_dict[self.png_list[self.idx]]
self.ui.lableLineEdit.setStyleSheet("color: black;")
self.ui.lableLineEdit.setText(label)
print('show', self.idx)
self.ui.imgLabel.setText(self.png_list[self.idx])
self.ui.groupBox.setTitle(self.png_list[self.idx])
pixmap = QPixmap(os.path.join(self.dir, self.png_list[self.idx]))
self.ui.imgLabel.setPixmap(pixmap)
def eventFilter(self, source, event):
if event.type() == QEvent.KeyPress and source is self.ui.lableLineEdit:
if event.key() == Qt.Key_Up:
self.on_preBtn_clicked()
if event.key() == Qt.Key_Down:
self.on_nextBtn_clicked()
# print('key press:', (event.key(), event.text()))
return super(AppWindow, self).eventFilter(source, event)
@Slot()
def on_preBtn_clicked(self):
skip = self.ui.skipCheckBox.isChecked()
self.idx -= 1
if self.idx < 0:
self.idx = 0
return
while skip and self.png_dict[self.png_list[self.idx]] != "":
self.idx -= 1
if self.idx < 0:
self.idx = 0
return
self.update_img()
print("上一個")
@Slot()
def on_nextBtn_clicked(self):
skip = self.ui.skipCheckBox.isChecked()
self.idx += 1
if self.idx >= len(self.png_list):
self.idx = len(self.png_list)-1
return
while skip and self.png_dict[self.png_list[self.idx]] != "":
self.idx += 1
if self.idx >= len(self.png_list):
self.idx = len(self.png_list)-1
return
self.update_img()
print("下一個")
@Slot()
def on_lableLineEdit_returnPressed(self):
self.png_dict[self.png_list[self.idx]] = self.ui.lableLineEdit.text().strip().upper()
self.on_nextBtn_clicked()
print('enter')
@Slot()
def on_enterBtn_clicked(self):
self.on_lableLineEdit_returnPressed()
@Slot()
def on_saveBtn_clicked(self):
self.save_csv()
def closeEvent(self, event):
self.save_csv()
event.accept()
def main():
app = QApplication(sys.argv)
w = AppWindow()
w.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| 31.931818 | 102 | 0.561329 | 524 | 4,215 | 4.364504 | 0.263359 | 0.070398 | 0.067337 | 0.059029 | 0.352864 | 0.249672 | 0.24049 | 0.219502 | 0.194141 | 0.194141 | 0 | 0.005165 | 0.311032 | 4,215 | 131 | 103 | 32.175573 | 0.782369 | 0.037485 | 0 | 0.317308 | 0 | 0 | 0.021956 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.057692 | 0 | 0.259615 | 0.048077 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63044308a09ddd0db4370dbbd06846e24239a8e4 | 24,316 | py | Python | PybulletSimulation/tripedal_walkGenerator.py | Einsbon/tripedal-robot-prjoect | 6cf686c4ba4b58c771fdcf2443d6b4869d2997c7 | [
"MIT"
] | 3 | 2020-09-19T03:35:37.000Z | 2021-11-29T08:08:38.000Z | PybulletSimulation/tripedal_walkGenerator.py | Einsbon/tripedal-robot-prjoect | 6cf686c4ba4b58c771fdcf2443d6b4869d2997c7 | [
"MIT"
] | null | null | null | PybulletSimulation/tripedal_walkGenerator.py | Einsbon/tripedal-robot-prjoect | 6cf686c4ba4b58c771fdcf2443d6b4869d2997c7 | [
"MIT"
] | null | null | null | from matplotlib.pyplot import plot
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
import os
import csv
from tripedal_kinematics import TripedalKinematics
COS120 = math.cos(math.pi * 2 / 3)
SIN120 = math.sin(math.pi * 2 / 3)
COS240 = math.cos(-math.pi * 2 / 3)
SIN240 = math.sin(-math.pi * 2 / 3)
class WalkGenerator():
def __init__(self):
super().__init__()
def SetWalkParameter(self,
moveDirection: float,
bodyMovePointsCount: int,
legMovePointsCount: int,
stepLength: float,
stepHeight: float,
legXYDistanceFromCenter: float,
sit: float,
swayShift: int,
swayRadiusMin: float,
swayRadiusMax: float,
liftPush=0.4,
landPull=0.6,
damping=0,
incline=0):
# I recommend adjusting these values while checking the graph.
# This is not an algorithm created through any research, it is just an implementation of my idea.
self._moveDirection = moveDirection # angle of direction. leg a direction is 0. leg B direction is pi * 2/3
# 발을 움직이는 각
self._bodyMoveCount = bodyMovePointsCount # number of points when the legs are not floated.
# 3점 지지 (세 발이 다 바닥에 붙어있고 몸체가 움직임) 때 지점의 갯수
self._legMoveCount = legMovePointsCount # number of points when one leg is floating
# 발이 움직일때 지점의 갯수.
self._l = stepLength # The distance of one step.
# 보폭
self._h = stepHeight # The height of the step.
# 발을 들어올리는 높이. 모션을 만든 후에 (실시간 센서 값과 별다른 알고리즘 등을 통하여)
# 값을 조절하여 쓸 것이라면 높이를 설정하여도 무방하지만 이 코드대로 쓸 것이면 0 값을 추천함.
self._legToCenter = legXYDistanceFromCenter # The value of how far the foot is from the central axis.
# If 0, the three feet are clustered in the middle (of course not recommended).
# Increasing the value increases the distance of three feet.
# 중심 축으로부터 발이 얼만큼 떨어져 있는지 값임.
# 0이면 세 발이 가운데 모여있음 (당연히 권장 안함). 값을 증가시킬수록 세 발의 거리가 벌려짐.
self._sit = sit #
#
self._swayShift = swayShift # Adjust the timing of sway and foot lift.
# If this value is 1, the foot is lifted when the body is swayed to the maximum
# opposite direction of the moving foot.
# If this value is 0, the foot is floated when the body is swayed maximum.
# around 0.5 is recommended. do not set under -0.5 and over 1.5
# 이 값이 0이면 발이 뜰때, 1이면 발이 착지할 때 몸체가 최대로 sway 된다. 0.5 주변의 값을 추천함.
self._liftPush = liftPush # push the lifting foot backward when lifting the foot to gains momentum. 0.2 ~ 1.0 is recommended.
# 이 값을 증가시키면 발 들어올린 직후의 약간 발을 뒤로 함. 0이면 완전한 사인 곡선 형태로 움직임.
# 증가시킬수록 둥글게 됨. 0.2~1.0의 값을 추천함.
self._landPull = landPull # Before put the foot down, go forward more and pull back when landing.
# 이 값을 증가시키면 발을 착륙하기 직전에 발을 앞으로 함. 0이면 완전한 사인 곡선
# 형태로 착륙함. 증가시킬수록 둥글게 됨. 0.2~1.0의 값을 추천함.
self._swayRadiusMin = swayRadiusMin # minimum length to sway
self._swayRadiusMax = swayRadiusMax # maximum length to sway in the opposite direction of the moving foot.
self._damping = damping # // not implemented yet
self._incline = incline # tangent angle of incline // not implemented yet
#버리게 될 거:
'''
self._swayRadiusMin
self._swayRadiusMax
'''
def InitProperty(self):
cosMov = math.cos(self._moveDirection)
sinMov = math.sin(self._moveDirection)
self._InitialPointA = [self._legToCenter, 0, 0]
rx = COS120 * self._legToCenter
ry = SIN120 * self._legToCenter
self._InitialPointB = [rx, ry, 0]
rx = COS240 * self._legToCenter
ry = SIN240 * self._legToCenter
self._InitialPointC = [rx, ry, 0]
self._cycleLength = (self._bodyMoveCount * 3 + self._legMoveCount * 3)
self._cycleCount = int(0)
self._notWalkPoitCount = (self._bodyMoveCount * 3 + self._legMoveCount * 2)
self._liftedVectorA = [0.0, 0.0, 0.0]
self._liftedVectorB = [0.0, 0.0, 0.0]
self._liftedVectorC = [0.0, 0.0, 0.0]
self._puttedVectorA = [0.0, 0.0, 0.0]
self._puttedVectorB = [0.0, 0.0, 0.0]
self._puttedVectorC = [0.0, 0.0, 0.0]
self._targetToPutVectorA = [0.0, 0.0, 0.0]
self._targetToPutVectorB = [0.0, 0.0, 0.0]
self._targetToPutVectorC = [0.0, 0.0, 0.0]
self._moveVectorA = [0.0, 0.0, 0.0]
self._moveVectorB = [0.0, 0.0, 0.0]
self._moveVectorC = [0.0, 0.0, 0.0]
self._resultVectorA = [0.0, 0.0, 0.0]
self._resultVectorB = [0.0, 0.0, 0.0]
self._resultVectorC = [0.0, 0.0, 0.0]
self._swayVector = [0.0, 0.0, self._sit]
self._swayLength = 0.0
self._dragVectorChangeSpeed = 0
self._dragVectorChangeSpeedMax = 3.0
self._dragVectorChanged = False
self._dragVectorMult = (3 * self._bodyMoveCount + 2 * self._legMoveCount) / (2 * self._bodyMoveCount +
2 * self._legMoveCount)
self._dragVectorX = 0.0
self._dragVectorY = 0.0
self._dragVectorX_target = 0.0
self._dragVectorY_target = 0.0
def MakeNextPoint(self):
isThreeSupport = None # bool
progThreeSupport = None # float
progFloatX = None # float
progDragA = None # float
progDragB = None # float
progDragC = None # float
FloatingLegVectorX = None # float
FloatingLegVectorZ = None # float
i = self._cycleCount % (self._legMoveCount + self._bodyMoveCount)
if i >= self._bodyMoveCount:
# foot lift
progFloatX = (i + 1 - self._bodyMoveCount) / self._legMoveCount # 0 ~ 1
isThreeSupport = False
else:
# three foots suport
progThreeSupport = (i + 1) / self._bodyMoveCount # 0 ~ 1
isThreeSupport = True
cyclecountA = (self._cycleCount + self._bodyMoveCount * 2 + self._legMoveCount * 2) % self._cycleLength
cyclecountB = (self._cycleCount + self._bodyMoveCount + self._legMoveCount) % self._cycleLength
cyclecountC = (self._cycleCount) % self._cycleLength
cosMov = math.cos(self._moveDirection)
sinMov = math.sin(self._moveDirection)
difX = self._dragVectorX_target - self._dragVectorX
difY = self._dragVectorY_target - self._dragVectorY
distXY = math.sqrt(difX * difX + difY * difY)
if (isThreeSupport == True):
if progThreeSupport < 0.7 and progThreeSupport > 0.3:
dragVecX = -self._l * self._dragVectorMult * cosMov
dragVecY = -self._l * self._dragVectorMult * sinMov
# if target drag vector changed
if (self._dragVectorX_target != dragVecX or self._dragVectorY_target != dragVecY):
#change target drag vector
self._dragVectorX_target = -self._l * self._dragVectorMult * cosMov
self._dragVectorY_target = -self._l * self._dragVectorMult * sinMov
difX = self._dragVectorX_target - self._dragVectorX
difY = self._dragVectorY_target - self._dragVectorY
distXY = math.sqrt(difX * difX + difY * difY)
if (distXY > 0):
count = math.ceil(
(distXY / self._dragVectorChangeSpeedMax) / (self._bodyMoveCount + self._legMoveCount))
self._dragVectorChangeSpeed = distXY / ((self._bodyMoveCount + self._legMoveCount) * count)
else:
self._dragVectorChangeSpeed = 0.0
else:
t = progFloatX # 0 ~ 1
sin_tpi = math.sin(t * math.pi)
x = (2 * t + (1 - t) * self._liftPush * -sin_tpi + t * self._landPull * sin_tpi) / 2 #0~1
FloatingLegVectorX = x
FloatingLegVectorZ = sin_tpi * self._h
if (distXY > 0):
if (distXY >= self._dragVectorChangeSpeed):
if difX != 0.0:
vecx = self._dragVectorChangeSpeed * difX / distXY
self._dragVectorX = self._dragVectorX + vecx
if difY != 0.0:
vecy = self._dragVectorChangeSpeed * difY / distXY
self._dragVectorY = self._dragVectorY + vecy
else:
self._dragVectorX = self._dragVectorX_target
self._dragVectorY = self._dragVectorY_target
# A
if (cyclecountA < self._notWalkPoitCount):
# drag
progDragA = float(cyclecountA + 1) / self._notWalkPoitCount
self._moveVectorA[0] = self._moveVectorA[0] + self._dragVectorX / self._notWalkPoitCount
self._moveVectorA[1] = self._moveVectorA[1] + self._dragVectorY / self._notWalkPoitCount
self._moveVectorA[2] = self._sit
if (progDragA == 1.0):
# ready to float
self._liftedVectorA[0] = self._moveVectorA[0]
self._liftedVectorA[1] = self._moveVectorA[1]
self._targetToPutVectorA[0] = -(self._dragVectorX / 2)
self._targetToPutVectorA[1] = -(self._dragVectorY / 2)
else:
# float
progFloatA = progFloatX
self._moveVectorA[0] = self._targetToPutVectorA[0] * FloatingLegVectorX + (
1 - FloatingLegVectorX) * self._liftedVectorA[0]
self._moveVectorA[1] = self._targetToPutVectorA[1] * FloatingLegVectorX + (
1 - FloatingLegVectorX) * self._liftedVectorA[1]
self._moveVectorA[2] = self._sit + FloatingLegVectorZ
if (progFloatX == 1.0):
# put
self._puttedVectorA[0] = self._moveVectorA[0]
self._puttedVectorA[1] = self._moveVectorA[1]
self._puttedVectorA[2] = self._moveVectorA[2]
# B
if (cyclecountB < self._notWalkPoitCount):
# drag
progDragB = float(cyclecountB + 1) / self._notWalkPoitCount
self._moveVectorB[0] = self._moveVectorB[0] + self._dragVectorX / self._notWalkPoitCount
self._moveVectorB[1] = self._moveVectorB[1] + self._dragVectorY / self._notWalkPoitCount
self._moveVectorB[2] = self._sit
if (progDragB == 1.0):
# ready to float
self._liftedVectorB[0] = self._moveVectorB[0]
self._liftedVectorB[1] = self._moveVectorB[1]
self._targetToPutVectorB[0] = -(self._dragVectorX / 2)
self._targetToPutVectorB[1] = -(self._dragVectorY / 2)
else:
# float
progFloatB = progFloatX
self._moveVectorB[0] = self._targetToPutVectorB[0] * FloatingLegVectorX + (
1 - FloatingLegVectorX) * self._liftedVectorB[0]
self._moveVectorB[1] = self._targetToPutVectorB[1] * FloatingLegVectorX + (
1 - FloatingLegVectorX) * self._liftedVectorB[1]
self._moveVectorB[2] = self._sit + FloatingLegVectorZ
if (progFloatX == 1.0):
# put
self._puttedVectorB[0] = self._moveVectorB[0]
self._puttedVectorB[1] = self._moveVectorB[1]
self._puttedVectorB[2] = self._moveVectorB[2]
# C
if (cyclecountC < self._notWalkPoitCount):
# drag
progDragC = float(cyclecountC + 1) / self._notWalkPoitCount
self._moveVectorC[0] = self._moveVectorC[0] + self._dragVectorX / self._notWalkPoitCount
self._moveVectorC[1] = self._moveVectorC[1] + self._dragVectorY / self._notWalkPoitCount
self._moveVectorC[2] = self._sit
if (progDragC == 1.0):
# ready to float
self._liftedVectorC[0] = self._moveVectorC[0]
self._liftedVectorC[1] = self._moveVectorC[1]
self._targetToPutVectorC[0] = -(self._dragVectorX / 2)
self._targetToPutVectorC[1] = -(self._dragVectorY / 2)
else:
# float
progFloatC = progFloatX
self._moveVectorC[0] = self._targetToPutVectorC[0] * FloatingLegVectorX + (
1 - FloatingLegVectorX) * self._liftedVectorC[0]
self._moveVectorC[1] = self._targetToPutVectorC[1] * FloatingLegVectorX + (
1 - FloatingLegVectorX) * self._liftedVectorC[1]
self._moveVectorC[2] = self._sit + FloatingLegVectorZ
if (progFloatX == 1.0):
# put
self._puttedVectorC[0] = self._moveVectorC[0]
self._puttedVectorC[1] = self._moveVectorC[1]
self._puttedVectorC[2] = self._moveVectorC[2]
# sway vector
i = self._cycleCount % (self._legMoveCount + self._bodyMoveCount)
t = -0.5 + (i + 1 + self._swayShift * self._legMoveCount) / (self._legMoveCount + self._bodyMoveCount)
if t < 0:
t = t + 1
tmpX = -self._swayRadiusMin / 2 - math.sin(math.pi * t) * (self._swayRadiusMax - self._swayRadiusMin / 2)
tmpY = math.sqrt(3) * self._swayRadiusMin / 2 - t * math.sqrt(3) * self._swayRadiusMin - math.sin(
math.pi * 2 * t) * ((self._swayRadiusMax - (0.5 + 3 / math.pi) * self._swayRadiusMin) /
(2 * math.sqrt(3)))
#rotation = -math.pi * 2 / 3
tmpSwayAX = COS240 * tmpX - SIN240 * tmpY
tmpSwayAY = SIN240 * tmpX + COS240 * tmpY
elif t > 1:
t = t - 1
tmpX = -self._swayRadiusMin / 2 - math.sin(math.pi * t) * (self._swayRadiusMax - self._swayRadiusMin / 2)
tmpY = math.sqrt(3) * self._swayRadiusMin / 2 - t * math.sqrt(3) * self._swayRadiusMin - math.sin(
math.pi * 2 * t) * ((self._swayRadiusMax - (0.5 + 3 / math.pi) * self._swayRadiusMin) /
(2 * math.sqrt(3)))
#rotation = math.pi * 2 / 3
tmpSwayAX = COS120 * tmpX - SIN120 * tmpY
tmpSwayAY = SIN120 * tmpX + COS120 * tmpY
else:
tmpSwayAX = -self._swayRadiusMin / 2 - math.sin(
math.pi * t) * (self._swayRadiusMax - self._swayRadiusMin / 2)
tmpSwayAY = math.sqrt(3) * self._swayRadiusMin / 2 - t * math.sqrt(3) * self._swayRadiusMin - math.sin(
math.pi * 2 * t) * ((self._swayRadiusMax - (0.5 + 3 / math.pi) * self._swayRadiusMin) /
(2 * math.sqrt(3)))
#0 before_A_move
if self._cycleCount < self._bodyMoveCount * 1 + self._legMoveCount * 1:
self._swayVector = np.array([tmpSwayAX, tmpSwayAY, 0])
#2 before_B_move
elif self._cycleCount < self._bodyMoveCount * 2 + self._legMoveCount * 2:
self._swayVector = np.array(
[COS120 * tmpSwayAX - SIN120 * tmpSwayAY, SIN120 * tmpSwayAX + COS120 * tmpSwayAY, 0])
#4 before_C_move
else:
self._swayVector = np.array(
[COS240 * tmpSwayAX - SIN240 * tmpSwayAY, SIN240 * tmpSwayAX + COS240 * tmpSwayAY, 0])
self._resultVectorA[0] = self._moveVectorA[0] + self._InitialPointA[0] - self._swayVector[0]
self._resultVectorA[1] = self._moveVectorA[1] + self._InitialPointA[1] - self._swayVector[1]
self._resultVectorA[2] = self._moveVectorA[2] + self._InitialPointA[2] - 0
self._resultVectorB[0] = self._moveVectorB[0] + self._InitialPointB[0] - self._swayVector[0]
self._resultVectorB[1] = self._moveVectorB[1] + self._InitialPointB[1] - self._swayVector[1]
self._resultVectorB[2] = self._moveVectorB[2] + self._InitialPointB[2] - 0
self._resultVectorC[0] = self._moveVectorC[0] + self._InitialPointC[0] - self._swayVector[0]
self._resultVectorC[1] = self._moveVectorC[1] + self._InitialPointC[1] - self._swayVector[1]
self._resultVectorC[2] = self._moveVectorC[2] + self._InitialPointC[2] - 0
self._cycleCount = self._cycleCount + 1
if self._cycleCount >= self._cycleLength:
self._cycleCount = 0
return self._resultVectorA, self._resultVectorB, self._resultVectorC, self._swayVector
def ShowTestGraphAnimation():
wg = WalkGenerator()
wg.SetWalkParameter(moveDirection=0,
bodyMovePointsCount=10,
legMovePointsCount=7,
stepLength=50,
stepHeight=30,
legXYDistanceFromCenter=70,
sit=60,
swayShift=0.5,
swayRadiusMin=20,
swayRadiusMax=26,
liftPush=0.6,
landPull=0.6,
damping=0,
incline=0)
#wg.BeforeMakeOnePoint()
wg.InitProperty()
queue = []
fig = plt.figure(1)
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim([-80, 80])
ax.set_ylim([-80, 80])
ax.set_proj_type('ortho')
dv, = ax.plot(wg._dragVectorX / 2, wg._dragVectorY / 2, float(wg._sit), 'x', color='red')
dvt, = ax.plot(wg._dragVectorX_target / 2, wg._dragVectorY_target / 2, float(wg._sit), '+', color='purple')
plt.pause(1)
for _ in range(wg._cycleLength - 10):
a, b, c, s = wg.MakeNextPoint()
aa, = ax.plot(a[0], a[1], a[2], '>', color='red')
bb, = ax.plot(b[0], b[1], b[2], '^', color='green')
cc, = ax.plot(c[0], c[1], c[2], 'v', color='blue')
ss, = ax.plot(s[0], s[1], wg._sit, '*', color='yellow')
queue.append(aa)
queue.append(bb)
queue.append(cc)
queue.append(ss)
plt.pause(0.001)
for _ in range(40):
a, b, c, s = wg.MakeNextPoint()
aa, = ax.plot(a[0], a[1], a[2], '>', color='red')
bb, = ax.plot(b[0], b[1], b[2], '^', color='green')
cc, = ax.plot(c[0], c[1], c[2], 'v', color='blue')
ss, = ax.plot(s[0], s[1], wg._sit, '*', color='yellow')
queue.append(aa)
queue.append(bb)
queue.append(cc)
queue.append(ss)
da = queue.pop(0)
db = queue.pop(0)
dc = queue.pop(0)
ds = queue.pop(0)
da.remove()
db.remove()
dc.remove()
ds.remove()
dv.remove()
dv, = ax.plot(wg._dragVectorX / 2, wg._dragVectorY / 2, float(wg._sit), 'x', color='red')
dvt.remove()
dvt, = ax.plot(wg._dragVectorX_target / 2, wg._dragVectorY_target / 2, float(wg._sit), '+', color='purple')
plt.pause(0.001)
wg._l = 30
wg._moveDirection = 1
print('''
wg._l = 30
wg._moveDirection = 1''')
#while (True):
for _ in range(150):
a, b, c, s = wg.MakeNextPoint()
aa, = ax.plot(a[0], a[1], a[2], '>', color='red')
bb, = ax.plot(b[0], b[1], b[2], '^', color='green')
cc, = ax.plot(c[0], c[1], c[2], 'v', color='blue')
ss, = ax.plot(s[0], s[1], wg._sit, '*', color='yellow')
queue.append(aa)
queue.append(bb)
queue.append(cc)
queue.append(ss)
da = queue.pop(0)
db = queue.pop(0)
dc = queue.pop(0)
ds = queue.pop(0)
da.remove()
db.remove()
dc.remove()
ds.remove()
dv.remove()
dv, = ax.plot(wg._dragVectorX / 2, wg._dragVectorY / 2, float(wg._sit), 'x', color='red')
dvt.remove()
dvt, = ax.plot(wg._dragVectorX_target / 2, wg._dragVectorY_target / 2, float(wg._sit), '+', color='purple')
plt.pause(0.001)
wg._l = 40
wg._moveDirection = -1.5
print('''
wg._l = 40
wg._moveDirection = -1.5''')
#while (True):
for _ in range(150):
a, b, c, s = wg.MakeNextPoint()
aa, = ax.plot(a[0], a[1], a[2], '>', color='red')
bb, = ax.plot(b[0], b[1], b[2], '^', color='green')
cc, = ax.plot(c[0], c[1], c[2], 'v', color='blue')
ss, = ax.plot(s[0], s[1], wg._sit, '*', color='yellow')
queue.append(aa)
queue.append(bb)
queue.append(cc)
queue.append(ss)
da = queue.pop(0)
db = queue.pop(0)
dc = queue.pop(0)
ds = queue.pop(0)
da.remove()
db.remove()
dc.remove()
ds.remove()
dv.remove()
dv, = ax.plot(wg._dragVectorX / 2, wg._dragVectorY / 2, float(wg._sit), 'x', color='red')
dvt.remove()
dvt, = ax.plot(wg._dragVectorX_target / 2, wg._dragVectorY_target / 2, float(wg._sit), '+', color='purple')
plt.pause(0.001)
wg._l = 0
wg._moveDirection = -1.5
print('''
wg._l = 40
wg._moveDirection = -1.5''')
#while (True):
for _ in range(150):
a, b, c, s = wg.MakeNextPoint()
aa, = ax.plot(a[0], a[1], a[2], '>', color='red')
bb, = ax.plot(b[0], b[1], b[2], '^', color='green')
cc, = ax.plot(c[0], c[1], c[2], 'v', color='blue')
ss, = ax.plot(s[0], s[1], wg._sit, '*', color='yellow')
queue.append(aa)
queue.append(bb)
queue.append(cc)
queue.append(ss)
da = queue.pop(0)
db = queue.pop(0)
dc = queue.pop(0)
ds = queue.pop(0)
da.remove()
db.remove()
dc.remove()
ds.remove()
dv.remove()
dv, = ax.plot(wg._dragVectorX / 2, wg._dragVectorY / 2, float(wg._sit), 'x', color='red')
dvt.remove()
dvt, = ax.plot(wg._dragVectorX_target / 2, wg._dragVectorY_target / 2, float(wg._sit), '+', color='purple')
plt.pause(0.001)
def SaveCSV():
wg = WalkGenerator()
wg.SetWalkParameter(moveDirection=0.5,
bodyMovePointsCount=10,
legMovePointsCount=7,
stepLength=50,
stepHeight=1,
legXYDistanceFromCenter=86,
sit=60,
swayShift=0.6,
swayRadiusMin=18,
swayRadiusMax=24,
liftPush=0.6,
landPull=0.6,
damping=0,
incline=0)
wg.InitProperty()
file = open(os.path.abspath(os.path.dirname(__file__)) + '/TripedalGaitPoints.csv',
'w',
encoding='utf-8',
newline='')
csvfile = csv.writer(file)
for i in range(102):
a, b, c, _ = wg.MakeNextPoint()
for i in range(51):
a, b, c, _ = wg.MakeNextPoint()
csvfile.writerow(a + b + c)
file.close()
if __name__ == "__main__":
ShowTestGraphAnimation()
#SaveCSV()
| 43.037168 | 135 | 0.524099 | 2,648 | 24,316 | 4.654834 | 0.151057 | 0.014117 | 0.01509 | 0.014928 | 0.5086 | 0.365812 | 0.31251 | 0.277057 | 0.277057 | 0.268457 | 0 | 0.04326 | 0.361161 | 24,316 | 564 | 136 | 43.113475 | 0.750225 | 0.11219 | 0 | 0.419431 | 0 | 0 | 0.016193 | 0.001102 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014218 | false | 0 | 0.018957 | 0 | 0.037915 | 0.007109 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6304eabc779865ff07c835a6353f9805644f6fbc | 2,091 | py | Python | coosa_control.py | MatthewScholefield/coosa-control | e6f7625290eacc8c6cfdd02b650f20a668307931 | [
"MIT"
] | 1 | 2020-12-08T22:29:53.000Z | 2020-12-08T22:29:53.000Z | coosa_control.py | MatthewScholefield/coosa-control | e6f7625290eacc8c6cfdd02b650f20a668307931 | [
"MIT"
] | 1 | 2022-02-04T03:15:34.000Z | 2022-02-04T03:28:45.000Z | coosa_control.py | MatthewScholefield/coosa-control | e6f7625290eacc8c6cfdd02b650f20a668307931 | [
"MIT"
] | null | null | null | from os import makedirs
import socket as sk
from appdirs import user_config_dir
from argparse import ArgumentParser
from ast import literal_eval
from os.path import isfile, join, dirname
def main():
parser = ArgumentParser(description='Lights control script for coosa smart plugs')
parser.add_argument('command', nargs='?', choices=['on', 'off'])
parser.add_argument('-i', '--ip-address', help='IP address of device')
parser.add_argument('-s', '--save-params', action='store_true', help='Save params as default')
parser.add_argument('-e', '--enable-data', help='File with data to send to enable plug')
parser.add_argument('-d', '--disable-data', help='File with data to send to disable plug')
args = parser.parse_args()
params_file = join(user_config_dir('coosa-control'), 'params.dat')
if isfile(params_file):
with open(params_file) as f:
params = literal_eval(f.read())
else:
params = {}
params = {k: v or params.get(k) for k, v in vars(args).items()}
address = params['ip_address']
enable = params['enable_data']
disable = params['disable_data']
if not enable or not disable or not address:
parser.error('Specify --ip-address, --enabled-data, and --disable-data, or save params first.')
raise SystemExit(1)
if not args.command and not args.save_params:
parser.error('Specify a command or use --save-params')
if isinstance(enable, str):
with open(enable, 'rb') as f:
enable = params['enable_data'] = f.read()
if isinstance(disable, str):
with open(disable, 'rb') as f:
disable = params['disable_data'] = f.read()
if args.command:
sock = sk.socket(sk.AF_INET, sk.SOCK_STREAM)
sock.settimeout(10)
sock.connect((address, 6668))
sock.sendall(enable if args.command == 'on' else disable)
if args.save_params:
makedirs(dirname(params_file), exist_ok=True)
with open(params_file, 'w') as f:
f.write(str(params))
if __name__ == '__main__':
main()
| 34.278689 | 103 | 0.648972 | 292 | 2,091 | 4.523973 | 0.356164 | 0.04542 | 0.064345 | 0.024224 | 0.042392 | 0.042392 | 0.042392 | 0.042392 | 0 | 0 | 0 | 0.004274 | 0.216643 | 2,091 | 60 | 104 | 34.85 | 0.802198 | 0 | 0 | 0 | 0 | 0.021739 | 0.217121 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021739 | false | 0 | 0.130435 | 0 | 0.152174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63055f6a15786be50aacfe014dc4dc3b9d610661 | 2,669 | py | Python | pybinding/utils/time.py | lise1020/pybinding | 921d5c2ac0ecc0ef317ba28b0bf68899ea30709a | [
"BSD-2-Clause"
] | 159 | 2016-01-20T17:40:48.000Z | 2022-03-24T06:08:55.000Z | pybinding/utils/time.py | deilynazar/pybinding | ec1128aaa84a1b43a74fb970479ce4544bd63179 | [
"BSD-2-Clause"
] | 36 | 2016-11-01T17:15:12.000Z | 2022-03-08T14:31:51.000Z | pybinding/utils/time.py | deilynazar/pybinding | ec1128aaa84a1b43a74fb970479ce4544bd63179 | [
"BSD-2-Clause"
] | 57 | 2016-04-23T22:12:01.000Z | 2022-03-08T12:33:04.000Z | import time
__all__ = ['tic', 'toc', 'timed', 'pretty_duration']
_tic_times = []
def tic():
"""Set a start time"""
global _tic_times
_tic_times.append(time.time())
def toc(message=""):
"""Print the elapsed time from the last :func:`.tic`
Parameters
----------
message : str
Print this in front of the elapsed time.
"""
if not _tic_times:
raise RuntimeError("Called toc() without a tic()")
if message:
print(message, end=" ")
print(pretty_duration(time.time() - _tic_times.pop()))
class _Timed:
def __init__(self, message=""):
self.message = message
def __enter__(self):
self._enter_time = time.time()
return self
def __exit__(self, *_):
self.elapsed = time.time() - self._enter_time
if self.message:
print(self.message, self)
def __str__(self):
return pretty_duration(self.elapsed)
def timed(message=""):
"""Context manager which times its code block
Parameters
----------
message : str
Message to print on block exit, followed by the elapsed time.
"""
return _Timed(message)
def pretty_duration(seconds):
"""Return a pretty duration string
Parameters
----------
seconds : float
Duration in seconds
Examples
--------
>>> pretty_duration(2.1e-6)
'0.00ms'
>>> pretty_duration(2.1e-5)
'0.02ms'
>>> pretty_duration(2.1e-4)
'0.21ms'
>>> pretty_duration(2.1e-3)
'2.1ms'
>>> pretty_duration(2.1e-2)
'21ms'
>>> pretty_duration(2.1e-1)
'0.21s'
>>> pretty_duration(2.1)
'2.10s'
>>> pretty_duration(12.1)
'12.1s'
>>> pretty_duration(22.1)
'22s'
>>> pretty_duration(62.1)
'1:02'
>>> pretty_duration(621.1)
'10:21'
>>> pretty_duration(6217.1)
'1:43:37'
"""
miliseconds = seconds * 1000
if miliseconds < 1:
return "{:.2f}ms".format(miliseconds)
elif miliseconds < 10:
return "{:.1f}ms".format(miliseconds)
elif miliseconds < 100:
return "{:.0f}ms".format(miliseconds)
elif seconds < 10:
return "{:.2f}s".format(seconds)
elif seconds < 20:
return "{:.1f}s".format(seconds)
elif seconds < 60:
return "{:.0f}s".format(seconds)
else:
minutes = seconds // 60
seconds = int(seconds - minutes * 60)
if minutes < 60:
return "{minutes:.0f}:{seconds:02}".format(**locals())
else:
hours = minutes // 60
minutes = int(minutes - hours * 60)
return "{hours:.0f}:{minutes:02}:{seconds:02}".format(**locals())
| 23.008621 | 77 | 0.564631 | 323 | 2,669 | 4.501548 | 0.306502 | 0.163686 | 0.072215 | 0.070151 | 0.110041 | 0 | 0 | 0 | 0 | 0 | 0 | 0.059037 | 0.276508 | 2,669 | 115 | 78 | 23.208696 | 0.693941 | 0.311353 | 0 | 0.040816 | 0 | 0 | 0.098788 | 0.038182 | 0 | 0 | 0 | 0 | 0 | 1 | 0.163265 | false | 0 | 0.020408 | 0.020408 | 0.428571 | 0.061224 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6306db77973f46fce4f118fc50f83d8672808f49 | 948 | py | Python | verres/operation/masking.py | csxeba/Verres | 04230d22b7791f84d86b9eb2272a6314a27580ed | [
"MIT"
] | null | null | null | verres/operation/masking.py | csxeba/Verres | 04230d22b7791f84d86b9eb2272a6314a27580ed | [
"MIT"
] | null | null | null | verres/operation/masking.py | csxeba/Verres | 04230d22b7791f84d86b9eb2272a6314a27580ed | [
"MIT"
] | null | null | null | import cv2
import numpy as np
def decode_poly(poly, shape):
full_mask = np.zeros(shape, dtype="uint8")
pts = [np.round(np.array(p).reshape(-1, 2)).astype(int) for p in poly]
return cv2.fillPoly(full_mask, pts, color=1).astype(bool)
def decode_rle(rle, shape):
full_mask = np.zeros(np.prod(shape), dtype=bool)
fill = False
start = 0
for num in rle["counts"]:
end = start + num
full_mask[start:end] = fill
fill = not fill
start = end
return full_mask.reshape(shape[::-1]).T
def mask_from_annotation(annotation, image_shape):
return mask_from_representation(annotation["segmentation"], image_shape)
def mask_from_representation(segmentation_repr, image_shape):
if isinstance(segmentation_repr, list):
return decode_poly(segmentation_repr, image_shape).astype(bool)
elif "counts" in segmentation_repr:
return decode_rle(segmentation_repr, image_shape)
| 29.625 | 76 | 0.697257 | 135 | 948 | 4.711111 | 0.37037 | 0.062893 | 0.099057 | 0.122642 | 0.062893 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01043 | 0.190928 | 948 | 31 | 77 | 30.580645 | 0.818774 | 0 | 0 | 0 | 0 | 0 | 0.030591 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0 | 0.086957 | 0.043478 | 0.478261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6307e59604426db255414796ae8064f521aef865 | 21,028 | py | Python | PlaylistDatabase.py | kevinrigney/PlaylistDatabase | 283718409bc69d5cc91a7cf81a3c32c709ce25ad | [
"MIT"
] | null | null | null | PlaylistDatabase.py | kevinrigney/PlaylistDatabase | 283718409bc69d5cc91a7cf81a3c32c709ce25ad | [
"MIT"
] | null | null | null | PlaylistDatabase.py | kevinrigney/PlaylistDatabase | 283718409bc69d5cc91a7cf81a3c32c709ce25ad | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import mysql.connector as mysql
import datetime
from math import floor
from threading import RLock
from configparser import ConfigParser
class PlaylistDatabase():
'''
This database is designed to manage songs played by a
internet radio station. It stores the web address of the station,
some details about it, and the playlist of songs.
The playlist includes a link to a youtube video of the song.
'''
def _init_database_schema(self,commit=True):
'''
!!! ALL EXISTING DATA IS LOST WHEN USING THIS FUNCTION !!!
Initialize the database. This consists of:
* Dropping all relevant tables.
* Creating new empty tables.
'''
print('Dropping...')
try:
self._cur.execute('drop database PlaylistDB')
except mysql.errors.DatabaseError:
#print('No database exists.')
pass
self._cur.execute('create database PlaylistDB')
self._cur.execute('use PlaylistDB')
self._cur.execute('''CREATE TABLE IF NOT EXISTS Artist (
id INTEGER NOT NULL AUTO_INCREMENT UNIQUE,
artist_name VARCHAR(256) UNIQUE NOT NULL,
PRIMARY KEY (id),
KEY (artist_name)
)''')
self._cur.execute('''CREATE TABLE IF NOT EXISTS Album (
id INTEGER NOT NULL AUTO_INCREMENT UNIQUE,
album_name VARCHAR(256) NOT NULL,
artist_id INTEGER NOT NULL,
PRIMARY KEY (id),
FOREIGN KEY (artist_id) REFERENCES Artist(id) ON UPDATE CASCADE,
UNIQUE(album_name,artist_id)
)''')
self._cur.execute('''CREATE TABLE IF NOT EXISTS Track (
id INTEGER NOT NULL AUTO_INCREMENT UNIQUE,
track_name VARCHAR(256) NOT NULL,
youtube_link TEXT,
filesystem_link TEXT,
album_id INTEGER NOT NULL,
artist_id INTEGER NOT NULL,
PRIMARY KEY (id),
FOREIGN KEY (album_id) REFERENCES Album(id) ON UPDATE CASCADE,
FOREIGN KEY (artist_id) REFERENCES Artist(id) ON UPDATE CASCADE ,
KEY (track_name),
UNIQUE(track_name,album_id,artist_id)
)''')
self._cur.execute('''CREATE TABLE IF NOT EXISTS Station (
id INTEGER NOT NULL AUTO_INCREMENT UNIQUE,
station_name VARCHAR(256) NOT NULL UNIQUE,
web_address TEXT,
ignore_artists TEXT,
ignore_titles TEXT,
youtube_playlist_id TEXT,
active BOOL NOT NULL,
PRIMARY KEY (id),
KEY (station_name)
)''')
self._cur.execute('''CREATE TABLE IF NOT EXISTS Playlist (
id INTEGER NOT NULL AUTO_INCREMENT UNIQUE,
track_id INTEGER NOT NULL,
station_id INTEGER NOT NULL,
play_time DATETIME NOT NULL,
PRIMARY KEY (id),
KEY (station_id),
FOREIGN KEY (track_id) REFERENCES Track(id) ON UPDATE CASCADE,
FOREIGN KEY (station_id) REFERENCES Station(id) ON UPDATE CASCADE,
UNIQUE(track_id,station_id,play_time)
)''')
if commit:
self._conn.commit()
def _get_all_stations(self):
self._cur.execute('''SELECT * from Station''')
stations = self._cur.fetchall()
return stations
def _get_station_id_from_name(self,name):
self._cur.execute('''SELECT Station.id from Station where Station.station_name = %s''',(name,))
try:
station_id = self._cur.fetchone()[0]
except TypeError:
# LookupError seems better here
raise LookupError('Station: ' + str(name) + ' could not be found.')
#print('station_id: ' + str(station_id))
return station_id
def _make_artist(self,name,get_id=True,commit=True):
'''
Create an artist in the table.
For artists we only have a name.
'''
self._cur.execute('''
INSERT IGNORE INTO Artist(artist_name)
VALUES ( %s )''', (name,)
)
# If they're doing a bunch of makes they might not want
# to commit after each one
if commit:
self._conn.commit()
if get_id:
self._cur.execute('''
SELECT Artist.id FROM Artist WHERE
Artist.artist_name=%s''',(name,))
return self._cur.fetchone()[0]
def _make_album(self,artist_id,album,get_id=True,commit=True):
'''
Create an album in the table.
'''
# Get the artist id
self._cur.execute('''
INSERT IGNORE INTO Album(album_name,artist_id)
VALUES ( %s, %s )''', (album,artist_id)
)
# If they're doing a bunch of makes they might not want
# to commit after each one
if commit:
self._conn.commit()
if get_id:
self._cur.execute('''
SELECT Album.id FROM Album WHERE
Album.album_name=%s AND Album.artist_id=%s
''',(album,artist_id))
return self._cur.fetchone()[0]
def _make_track(self,name,album_id,artist_id,yt_link='',fs_link='',get_id=True,commit=True):
'''
Given a track name, ablum ID,and an artist ID, make a track in the
'Track' table. Optionally a youtube URL or filesystem location can also be specified.
'''
# We're doing a 'OR REPLACE' because maybe we're updating a track with a
# new youtube or filesystem link.
self._cur.execute('''
INSERT INTO Track (track_name,youtube_link,filesystem_link,album_id,artist_id)
VALUES( %s, %s, %s, %s, %s ) ON DUPLICATE KEY UPDATE
youtube_link=VALUES(youtube_link),filesystem_link=VALUES(filesystem_link)''',
(name,yt_link,fs_link,album_id,artist_id)
)
# If they're doing a bunch of makes they might not want
# to commit after each one
if commit:
self._conn.commit()
if get_id:
self._cur.execute('''
SELECT Track.id FROM Track WHERE
Track.track_name=%s AND
Track.youtube_link=%s AND
Track.filesystem_link=%s AND
Track.album_id=%s AND
Track.artist_id=%s''',(name,yt_link,fs_link,album_id,artist_id))
return self._cur.fetchone()[0]
def _add_playlist_entry(self,station_id,track_id,play_time,commit=True):
'''
Given a station ID, track ID, and a play time (a string date)
create a new row in the corresponding playlist table
'''
# No 'INSERT OR REPLACE INTO' because this should be unique based on the play times
self._cur.execute('''
INSERT INTO Playlist (track_id,station_id,play_time)
VALUES (%s, %s, %s)
''', (track_id,station_id,play_time)
)
# If they're doing a bunch of makes they might not want
# to commit after each one
if commit:
self._conn.commit()
return self._cur.lastrowid
#
# BEGIN PUBLIC FUNCTIONS
#
def create_station(self,station_name,web_address,ignore_artists=[],ignore_titles=[],youtube_playlist_id='',get_id=True,commit=True):
'''
Create a station and associated playlist
'''
with self._lock:
# Create a new playlist to use for this station
#playlist_name = self._make_playlist(station_name)
# v2 will use a different format for this... Probably another table?
ignore_artists = str(ignore_artists)
ignore_titles = str(ignore_titles)
#print(playlist_name)
self._cur.execute('''
INSERT IGNORE INTO Station(station_name,web_address,ignore_artists,ignore_titles,youtube_playlist_id,active)
VALUES ( %s, %s, %s, %s, %s, %s )''', (station_name,web_address,ignore_artists,ignore_titles,youtube_playlist_id,'true')
)
if commit:
self._conn.commit()
if get_id:
self._cur.execute('''
SELECT Station.id FROM Station WHERE
station_name=%s''',(station_name,))
return self._cur.fetchone()[0]
def add_track_to_station_playlist(self,station_name,artist,album,track,date,youtube_link='',commit = True):
'''
This public function takes a station common name
and a tuple representing the tracks data. It looks up the
playlist, creates an artist (if necessary), creates a
track (if necessary), and adds the track to the playlist
'''
with self._lock:
# This might happen. But upstream from here we should really be
# catching stuff like this
if artist == '' or track == '':
#print('Skipped')
return None
# Make a short link
youtube_link = youtube_link.replace('https://www.youtube.com/watch?v=','https://youtu.be/')
# Now that we have the data...
# Loop up the station's ID
station_id = self._get_station_id_from_name(station_name)
#print('playlist_id is :'+ playlist_id)
# Make (or don't) the artist
artist_id = self._make_artist(artist,commit=commit)
# Make (or don't) the album
album_id = self._make_album(artist_id,album,commit=commit)
# Make (or don't) a track
track_id = self._make_track(track,album_id,artist_id,youtube_link,commit=commit)
# Make a date. It's stored as a string because
# sqlite doesn't have a date data type. That's OK though
# because sqlite can search based on this string's structure.
#date_ms = int(floor(date.microsecond/1000))
date = date.strftime('%Y-%m-%d %H:%M:%S.%f')
# Now that we have the data we can make an entry
return self._add_playlist_entry(station_id,track_id,date,commit=commit)
def get_latest_station_tracks(self,station_name,num_tracks=1):
'''
Get a number of tracks from a station. Order from newest
to oldest.
'''
with self._lock:
station_id = self._get_station_id_from_name(station_name)
self._cur.execute('''SELECT Track.track_name, Artist.artist_name, Playlist.play_time, Track.youtube_link, Album.album_name, Track.filesystem_link FROM Playlist
JOIN Artist JOIN Track JOIN Album ON
Playlist.track_id = Track.id and Track.artist_id = Artist.id and Track.album_id = Album.id WHERE Playlist.station_id = %s
ORDER BY Playlist.play_time DESC LIMIT %s''',(station_id,num_tracks))
data = self._cur.fetchall()
# The data we will send back
tracks = []
for t in data:
temp = {}
temp['name'] = t[0]
temp['artist'] = t[1]
temp['time'] = t[2]
temp['youtube'] = t[3]
temp['album'] = t[4]
temp['filesystem'] = t[5]
tracks.append(temp)
if num_tracks == 1:
return tracks[0]
else:
return tracks
def get_station_data(self,station=None):
'''
Return a list of dictionaries of the station data
'''
with self._lock:
out_list = []
for s in self._get_all_stations():
id,name,web_address,ignore_artists,ignore_titles,youtube_playlist_id,active = s
if station is not None:
if name != station:
continue
channel_dict = {}
channel_dict['site'] = web_address
exec("channel_dict['ignoreartists'] = "+ ignore_artists)
exec("channel_dict['ignoretitles'] = "+ ignore_titles)
channel_dict['name'] = name
channel_dict['playlist'] = youtube_playlist_id
if (active == 1):
channel_dict['active'] = True
else:
channel_dict['active'] = False
try:
track_data = self.get_latest_station_tracks(name)
channel_dict['lastartist'] = track_data['artist']
channel_dict['lastsong'] = track_data['name']
except IndexError:
channel_dict['lastartist'] = ''
channel_dict['lastsong'] = ''
out_list.append(channel_dict)
if station is not None:
return out_list[0]
else:
return out_list
def look_up_song_youtube(self,artist,album,title):
'''
Given the artist, album, and title,
Look up the song's youtube URL
'''
with self._lock:
self._cur.execute('''SELECT Track.youtube_link from Track JOIN Artist JOIN Album ON
Track.artist_id = Artist.id and Track.album_id = Album.id WHERE Track.track_name = %s and Album.album_name = %s and Artist.artist_name = %s LIMIT 1''',
(title,album,artist))
url = self._cur.fetchone()
# LookupError seems better
if url == None:
raise LookupError
else:
return url[0]
def lookup_station_by_playlist_id(self,playlist_id):
with self._lock:
self._cur.execute('''SELECT * from Station where Station.youtube_playlist_id = %s''',(playlist_id,))
station = self._cur.fetchone()
if station == None:
raise LookupError
else:
id, name, addr, i_a, i_t, pl_id, active = station
station_dict = {}
station_dict['id'] = id
station_dict['name'] = name
station_dict['ignore_artists'] = i_a
station_dict['ignore_titles'] = i_t
station_dict['playlist_id'] = pl_id
station_dict['active'] = active
return station_dict
def __init__(self,user='root',password='password',host='127.0.0.1',initialize=False,config_file=None,connect=True):
# Config contains the config file, an INI format
config = ConfigParser()
if config_file is not None:
config.read(config_file)
# Use the values here instead of the available kwargs
self._user = config['database']['user']
self._password = config['database']['password']
self._host = config['database']['host']
self._conn = mysql.connect(user=self._user,password=self._password,host=self._host)
self._cur = self._conn.cursor()
# Check if the DB exists
try:
self._cur.execute('USE PlaylistDB;')
except mysql.errors.ProgrammingError:
print('The database does not exist. Initializing')
initialize = True
# Lock on our public-facing functions
self._lock = RLock()
if initialize:
self._init_database_schema()
if not connect:
# Then close the connection because they will use "with" statements
self._cur = None
self._conn.close()
self._conn = None
#main()
def __enter__(self):
self._conn = mysql.connect(user=self._user,password=self._password,host=self._host)
self._cur = self._conn.cursor()
self._cur.execute('USE PlaylistDB;')
return self._cur
def __exit__(self,exc_type,exc_value,exc_traceback):
self._cur = None
self._conn.commit()
self._conn.close()
self._conn = None
if __name__ == '__main__':
print('Unit Testing...')
db = PlaylistDatabase(initialize=True)
stations = []
for ii in range(10):
stations.append({
'name':'Station'+str(ii),
'site':'Station'+str(ii)+'.Site',
'ignoreartists':['Station'+str(ii)+'.ignoreartist1','Station'+str(ii)+'.ignoreartist2'],
'ignoretitles':['Station'+str(ii)+'.ignoretitle1','Station'+str(ii)+'.ignoretitle2'],
'playlist':'Station'+str(ii)+'.PlaylistURL'
})
local_station_count = len(stations)
for s in stations:
print('Inserting... ',end='')
id = db.create_station(s['name'],s['site'],s['ignoreartists'],s['ignoretitles'],s['playlist'])
print('Station id:',id)
# Do it again (to make sure we aren't duplicating stations
for s in stations:
print('Inserting... ',end='')
id = db.create_station(s['name'],s['site'],s['ignoreartists'],s['ignoretitles'],s['playlist'])
print('Station id:',id)
# TODO Make a test for this
assert local_station_count == len(db.get_station_data())
print('Verifying station data... ',end='')
# Make sure our data matches the DB
db_station_count = 0
for s in stations:
ret_station = db.get_station_data(s['name'])
for key in s:
assert s[key] == ret_station[key]
db_station_count+=1
# And make sure we got every station
assert db_station_count == local_station_count
# And make sure there aren't duplicates in the DB
assert local_station_count == len(db.get_station_data())
print('Done.')
# Make some tracks that will be overwritten by the next loop where we add tracks
# to a playlist
sname = stations[0]['name']
tracks = []
print('Making tracks... ')#,end='')
for ii in range(100):
tracks.append({
'album':'Album'+sname+str(ii),
'artist':'Artist'+sname+str(ii),
'name':'Name'+sname+str(ii),
'date':datetime.datetime.fromtimestamp(ii*1000),
'youtube':'Youtube'+sname+str(ii),
})
for t in tracks:
# Make (or don't) the artist
artist_id = db._make_artist(t['artist'],commit=False)
# Make (or don't) the album
album_id = db._make_album(artist_id,t['album'],commit=False)
# Make (or don't) a track
track_id = db._make_track(t['name'],album_id,artist_id,t['youtube'],commit=False)
#id = db.add_track_to_station_playlist(sname,t['album'],t['artist'],t['name'],t['date'],t['youtube'],commit=False)
#track_id.append(id)
print('Done.')
# Test insertion of songs into playlists
for s in stations:
sname = s['name']
print('Adding tracks to station',sname)
# Make some tracks for this station
tracks = []
for ii in range(10):
tracks.append({
'album':'Album'+sname+str(ii),
'artist':'Artist'+sname+str(ii),
'name':'Name'+sname+str(ii),
'date':datetime.datetime.fromtimestamp(ii*1000),
'youtube':'Youtube'+sname+str(ii),
})
for t in tracks:
id = db.add_track_to_station_playlist(sname,t['album'],t['artist'],t['name'],t['date'],t['youtube'],commit=False)
#track_id.append(id)
db._conn.commit()
# And make sure that all of the id's line up
for s in stations:
sname = s['name']
# Make some tracks for this station - JUST LIKE ABOVE
tracks = []
for ii in range(100):
tracks.append({
'album':'Album'+sname+str(ii),
'artist':'Artist'+sname+str(ii),
'name':'Name'+sname+str(ii),
'date':datetime.datetime.fromtimestamp(ii*1000),
'youtube':'Youtube'+sname+str(ii),
})
print('All tests passed')
| 36.570435 | 171 | 0.542515 | 2,451 | 21,028 | 4.48062 | 0.145247 | 0.024859 | 0.030596 | 0.014569 | 0.409215 | 0.350483 | 0.310235 | 0.262976 | 0.233473 | 0.221271 | 0 | 0.005254 | 0.357333 | 21,028 | 574 | 172 | 36.634146 | 0.807385 | 0.170535 | 0 | 0.387574 | 0 | 0.011834 | 0.297951 | 0.032466 | 0 | 0 | 0 | 0.001742 | 0.011834 | 1 | 0.047337 | false | 0.017751 | 0.014793 | 0 | 0.112426 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
630aa3b3c2eacc0d01c7ebb0002f73acd362aab2 | 1,907 | py | Python | playground.py | pancetta/python-hpc-performance | fc4c0fcd87d5a0fde78a0d6f284d1c89a31fbb03 | [
"BSD-2-Clause"
] | 1 | 2020-10-29T06:04:43.000Z | 2020-10-29T06:04:43.000Z | playground.py | pancetta/python-performance | fc4c0fcd87d5a0fde78a0d6f284d1c89a31fbb03 | [
"BSD-2-Clause"
] | null | null | null | playground.py | pancetta/python-performance | fc4c0fcd87d5a0fde78a0d6f284d1c89a31fbb03 | [
"BSD-2-Clause"
] | null | null | null | import glob
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
result_files = glob.glob('data/' + 'results*.json')
# df = pd.read_json('data/results_macbookpro.json')
for file in result_files:
df_full = pd.read_json(file)
df_seq = df_full[df_full['partype'] == 'sequential']
# Why filter by id? Could be that we ran the sequential benchmarks also with multiple cores to test other things.
# Here we take the fastest sequential run, whatever this may be.
idx = df_seq.groupby(['id'])['mean_duration'].transform(min) == df_seq['mean_duration']
df_seq = df_seq[idx]
penalties_seq = df_seq['timeline'].apply(np.asarray) \
.apply(lambda x: x[x > 0][2:-2]) \
.apply(lambda x: 1 - min(x.std() / x.mean(), 1) if len(x) > 0 else 1)
scores_seq = 1.0 / df_seq['mean_duration'] * penalties_seq
df_mt = df_full[df_full['partype'] == 'multithreaded']
idx = df_mt.groupby(['id'])['mean_duration'].transform(min) == df_mt['mean_duration']
df_mt = df_mt[idx]
penalties_mt = df_mt['timeline'].apply(np.asarray) \
.apply(lambda x: x[x > 0][2:-2]) \
.apply(lambda x: 1 - min(x.std() / x.mean(), 1) if len(x) > 0 else 1)
scores_mt = 1.0 / df_mt['mean_duration'] * penalties_mt
df_par = df_full[df_full['partype'] == 'mpi']
idx = df_par.groupby(['id'])['mean_duration'].transform(max) == df_par['mean_duration']
df_par = df_par[idx]
penalties_par = df_par['timeline'].apply(np.asarray)\
.apply(lambda x: x[x > 0][2:-2])\
.apply(lambda x: 1 - min(x.std() / x.mean(), 1) if len(x) > 0 else 1)
# Why multipy by MPI_Size? Doesn't matter for scaling tests, but rewards stress tests with more cores.
scores_par = df_par['MPI_size'] / df_par['mean_duration'] * penalties_par
print(file, np.median(scores_seq), np.median(scores_mt), np.median(scores_par))
exit()
| 41.456522 | 117 | 0.651285 | 313 | 1,907 | 3.785942 | 0.28754 | 0.091139 | 0.060759 | 0.03038 | 0.337553 | 0.264135 | 0.264135 | 0.205063 | 0.205063 | 0.205063 | 0 | 0.01615 | 0.188254 | 1,907 | 45 | 118 | 42.377778 | 0.749354 | 0.170425 | 0 | 0.193548 | 0 | 0 | 0.139683 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.16129 | 0 | 0.16129 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
630ac76cbe8ac21aa5e3d1eae8f88cc28e5683db | 3,129 | py | Python | phypidaq/GDK101Config.py | uadlq/PhyPiDAQ-PiOS11 | fc6060551be2cc0143a157081341bf3c338d9fbd | [
"BSD-2-Clause"
] | null | null | null | phypidaq/GDK101Config.py | uadlq/PhyPiDAQ-PiOS11 | fc6060551be2cc0143a157081341bf3c338d9fbd | [
"BSD-2-Clause"
] | null | null | null | phypidaq/GDK101Config.py | uadlq/PhyPiDAQ-PiOS11 | fc6060551be2cc0143a157081341bf3c338d9fbd | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function, division, unicode_literals
from __future__ import absolute_import
"""
interface for FTLAB GDK 101 gamma radiation detector
attention: I²C interface needs level shifter 5.0 <-> 3.3 V
"""
import sys
from smbus2 import SMBus
# default addresses
I2CADDR = 0x18
# code of driver classes included below
class GDK101Config(object):
"""interface for GDK101 gamma detector
1st channel: 1 min av
2nd channel: 10 min sliding average
"""
def __init__(self, confdict=None):
self.I2CADDR = I2CADDR
if confdict is None:
confdict = {}
if 'I2CADDR' in confdict:
self.I2CADDR = confdict['I2CADDR']
print("GDK101: I2C address set to %x " % self.I2CADDR)
if 'NChannels' in confdict:
self.NChannels = confdict["NChannels"]
else:
self.NChannels = 1
self.ChanLims = [[0, 200.], [0., 200.]]
self.ChanNams = ['D', 'D']
self.ChanUnits = ['µSv', 'µSv']
def init(self):
"""init sensor"""
try:
busnum = 1
self.sensor = GDK101(busnum, self.I2CADDR)
print("GDK101: sensor found, firmware version ", self.sensor.version())
except Exception as e:
print("GDK101: Error setting up device - exit")
print(str(e))
sys.exit(1)
def acquireData(self, buf):
"""read data from sensor"""
buf[0] = self.sensor.read1()
if self.NChannels > 1:
buf[1] = self.sensor.read10()
def closeDevice(self):
self.sensor.close()
# ----- driver section -----------
# GDK101 has a very simple I²C interface
# list of valid commands
CMD_reset = 0xA0 # reset
CMD_status = 0xB0 # reset
CMD_firmware = 0xB4 # read firmware
CMD_measuringTime = 0xB1 # read measurement time
CMD_readDose10 = 0xB2 # 10 min average, 1 min update
CMD_readDose1 = 0xB3 # 1 min average
class GDK101(object):
"""driver code for GDK101 gamma ray sensor"""
def __init__(self, busnum, addr):
self.bus = SMBus(busnum)
self.addr = addr
rc = self.reset()
if rc != 1: # reset failed
raise Exception('GKD101: failed to reset sensor')
def _readGKD101(self, cmd):
"""implement simple I²C interface of GDK101
- send command
- block-read two bytes
"""
self.bus.write_byte_data(self.addr, 0, cmd)
return self.bus.read_i2c_block_data(self.addr, 0, 2)
def reset(self):
d = self._readGKD101(CMD_reset)
return d[0]
def read1(self):
""" read 1 min average"""
d = self._readGKD101(CMD_readDose1)
return d[0] + d[1] / 100.
def read10(self):
""" read 10 min sliding average"""
d = self._readGKD101(CMD_readDose10)
return d[0] + d[1] / 100.
def version(self):
"""return firmware version"""
fw = self._readGKD101(CMD_firmware)
return str(fw[0] + fw[1] / 10.)
def close(self):
"""close bus"""
self.bus.close()
| 27.690265 | 83 | 0.588686 | 388 | 3,129 | 4.646907 | 0.365979 | 0.027732 | 0.037715 | 0.02995 | 0.04548 | 0.017748 | 0.017748 | 0 | 0 | 0 | 0 | 0.064458 | 0.295941 | 3,129 | 112 | 84 | 27.9375 | 0.753972 | 0.193992 | 0 | 0.030769 | 0 | 0 | 0.07736 | 0 | 0 | 0 | 0.012238 | 0 | 0 | 1 | 0.169231 | false | 0 | 0.061538 | 0 | 0.338462 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
630d292027aeed5562b84da22882b34d40a2510c | 1,179 | py | Python | main.py | m1stadev/infra-api | 3137e2ef70ccdec914557c2a1222feeb26026bd8 | [
"MIT"
] | 1 | 2022-03-19T18:27:45.000Z | 2022-03-19T18:27:45.000Z | main.py | m1stadev/infra-api | 3137e2ef70ccdec914557c2a1222feeb26026bd8 | [
"MIT"
] | null | null | null | main.py | m1stadev/infra-api | 3137e2ef70ccdec914557c2a1222feeb26026bd8 | [
"MIT"
] | null | null | null | from fastapi import BackgroundTasks, FastAPI, HTTPException
from typing import Optional
from utils.client import HeaterClient
from utils import errors, types
app = FastAPI(root_path='/heater')
api = HeaterClient()
async def _set_temp_limit(temp: types.TempData) -> None: await api.set_temp_limit(temp.temp)
@app.get('/actions/power')
async def toggle_power() -> Optional[dict]:
try:
await api.toggle_power()
return {'status': 'ok'}
except errors.HeaterError as e:
raise HTTPException(status_code=500, detail=str(e)) from e
@app.get('/actions/heat')
async def toggle_heat() -> Optional[dict]:
try:
await api.toggle_heat()
return {'status': 'ok'}
except errors.HeaterError as e:
raise HTTPException(status_code=500, detail=str(e)) from e
@app.post('/set/limit')
async def set_temp_limit(task: BackgroundTasks, temp: types.TempData) -> Optional[dict]:
if temp.temp == api.status['temp']:
return {'status': 'ok'}
if api.limit_running:
raise HTTPException(status_code=429, detail='Already setting a temperature limit.')
task.add_task(_set_temp_limit, temp)
return {'status': 'ok'}
| 31.864865 | 92 | 0.695505 | 159 | 1,179 | 5.031447 | 0.339623 | 0.04 | 0.06 | 0.06 | 0.345 | 0.295 | 0.2225 | 0.2225 | 0.2225 | 0.2225 | 0 | 0.009269 | 0.176421 | 1,179 | 36 | 93 | 32.75 | 0.814624 | 0 | 0 | 0.344828 | 0 | 0 | 0.098388 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.137931 | 0 | 0.275862 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
630f630380f0e429817ce079630c5a5803526011 | 19,389 | py | Python | ebu_tt_live/bindings/validation/timing.py | bbc/ebu-tt-live-toolkit | 2d0d6e655f83c29453220abf59c213b4c2a9fc02 | [
"BSD-3-Clause"
] | 1 | 2016-05-26T13:42:37.000Z | 2016-05-26T13:42:37.000Z | ebu_tt_live/bindings/validation/timing.py | bbc/ebu-tt-live-toolkit | 2d0d6e655f83c29453220abf59c213b4c2a9fc02 | [
"BSD-3-Clause"
] | 43 | 2016-04-20T14:36:06.000Z | 2021-11-29T11:22:40.000Z | ebu_tt_live/bindings/validation/timing.py | bbc/ebu-tt-live-toolkit | 2d0d6e655f83c29453220abf59c213b4c2a9fc02 | [
"BSD-3-Clause"
] | 5 | 2016-04-28T10:21:29.000Z | 2020-10-12T18:20:58.000Z | from datetime import timedelta
from ebu_tt_live.bindings import get_xml_parsing_context
from ebu_tt_live.errors import LogicError, SemanticValidationError, OutsideSegmentError, OverlappingActiveElementsError
from ebu_tt_live.strings import ERR_SEMANTIC_VALIDATION_TIMING_TYPE
import itertools
class TimingValidationMixin(object):
"""
This mixin is meant to be applied to timed elements (body, div, p, span) and provides parser hooks for timing
attributes as well as a generic semantic validation for timing attributes in the document's timeBase.
"""
_computed_begin_time = None
_computed_end_time = None
@property
def computed_begin_time(self):
return self._computed_begin_time
@property
def computed_end_time(self):
return self._computed_end_time
def _pre_timing_set_attribute(self, attr_en, attr_use):
# Pass in the timing_attribute_name to the context to help the timing type constructor refuse creation
context = get_xml_parsing_context()
if context is not None:
# This means we are in XML parsing mode
context['timing_attribute_name'] = attr_en.localName()
def _post_timing_set_attribute(self, attr_use):
context = get_xml_parsing_context()
if context is not None:
# Clean up after successful creation
context.pop('timing_attribute_name', None)
def _pre_init_variables(self, dataset, element_content):
self._begin_timedelta = self.begin and self.begin.timedelta or None
self._end_timedelta = self.end and self.end.timedelta or None
# We make sure end time is always none at the beginning because it can cause a LogicError with a stale value
self._computed_begin_time = None
self._computed_end_time = None
self._semantic_dataset = dataset
def _element_badly_timed(self, value, element):
return (element.begin is not None and \
element.end is not None and \
element.end <= element.begin)
def _post_cleanup_variables(self):
del self._semantic_dataset
del self._begin_timedelta
del self._end_timedelta
def _pre_assign_end(self, proposed_end):
self._semantic_dataset['timing_end_stack'].append(proposed_end)
self._computed_end_time = proposed_end
def _pre_calculate_end(self):
if self._end_timedelta is not None:
if self._semantic_dataset['timing_end_stack']:
# If there was already an end time in some parent element.
proposed_end = min(self._semantic_dataset['timing_syncbase'] + self._end_timedelta, self._semantic_dataset['timing_end_stack'][-1])
# New end
else:
proposed_end = self._semantic_dataset['timing_syncbase'] + self._end_timedelta
# If we have it assign it
self._pre_assign_end(proposed_end)
def _pre_assign_begin(self, proposed_begin):
if proposed_begin is not None:
# Store the element's activation begin times
# Let's push it onto the stack.
self._semantic_dataset['timing_begin_stack'].append(proposed_begin)
self._semantic_dataset['timing_syncbase'] += proposed_begin
# If we have a non-zero availability time we need to factor it in BUT the syncbase stays
# this checks if it is a ttd doc
if 'ttd_element' in self._semantic_dataset:
self._computed_begin_time = self._semantic_dataset['timing_syncbase']
else:
#assuming that this is a live document
if self._semantic_dataset['availability_time']:
self._computed_begin_time = max(self._semantic_dataset['timing_syncbase'],
self._semantic_dataset['availability_time'])
else:
self._computed_begin_time = self._semantic_dataset['timing_syncbase']
def _pre_calculate_begin(self):
self._pre_assign_begin(self._begin_timedelta)
def _post_calculate_begin(self, children):
"""
The computed begin time shall be moved down to match that of the earliest child begin time in case the container
does not specify a begin time itself. NOTE: This does not modify the syncbase.
:param children:
:return:
"""
if not children:
return
children_computed_begin_times = [item.computed_begin_time for item in children]
earliest_child_computed_begin = min(children_computed_begin_times)
if earliest_child_computed_begin > self._computed_begin_time:
# Adjustment scenario
# If no parent element specified a begin time, then we have found
# a case for the "earliest specified computed begin time" as per the
# specification and we can adjust the begin time to match the
# children's begin time.
if len(self._semantic_dataset['timing_begin_stack']) == 0:
self._computed_begin_time = earliest_child_computed_begin
def _semantic_preprocess_timing(self, dataset, element_content):
"""
As the validator traverses in a Depth First Search this is the hook function to call on the way DOWN.
Steps to take:
- Initialize temporary variables
- Calculate end timing if element defines an end time
- Calculate begin time and syncbase for children
:param dataset: Semantic dataset from semantic validation framework
:param element_content: PyXB's binding placeholder for this binding instance
"""
self._pre_init_variables(dataset, element_content)
self._pre_calculate_end()
# These assignments must happen last otherwise the syncbase will be wrong
# in calculations happening after syncbase adjustment.
self._pre_calculate_begin()
def _post_pop_begin(self):
begin_timedelta = None
if self._begin_timedelta is not None:
# We pushed on the stack it is time to pop it. It could probably be removed
# and replaced with self._begin_timedelta
begin_timedelta = self._semantic_dataset['timing_begin_stack'].pop()
self._semantic_dataset['timing_syncbase'] -= begin_timedelta
return begin_timedelta
def _post_pop_end(self):
end_timedelta = None
if self._end_timedelta is not None:
# We pushed on the stack it is time to pop it
end_timedelta = self._semantic_dataset['timing_end_stack'].pop()
return end_timedelta
def _semantic_postprocess_timing(self, dataset, element_content):
"""
As the validator traverses in a Depth First Search this is the hook function to call on the way UP
Steps to take:
- Fill in end times if element doesn't define end time
- Try using computed_end_time information from its children
- If no children are found look at parents end time constraints.
- Finalize computed_begin_time if begin is not specified using computed_begin_time of children.
:param dataset: Semantic dataset from semantic validation framework
:param element_content: PyXB's binding placeholder for this binding instance
"""
begin_timedelta = self._post_pop_begin()
# This end timedelta is an absolute calculated value on the timeline. Not relative.
end_timedelta = self._post_pop_end()
# If the forward running part of the traversal could not assign an end time we can use the backwards route
# which is in a way similar to dynamic programming because we take the children computed times and take the
# maximum value from them. SPECIAL case: a single leaf element with an undefined end time renders the entire
# branch undefined
if end_timedelta is not None and self.computed_end_time is None \
or end_timedelta is None and self.computed_end_time is not None:
# This is just a simple sanity check. It should never be triggered.
# Should the calculation be changed this filters out an obvious source of error.
raise LogicError()
children = None
if self.computed_end_time is None:
# This requires calculation based on the timings in its children.
# All timing containers are complexTypes so we can call orderedContent safely
# but we don't want to bother with explicitly badly timed elements so filter
# them out.
children = [item for item in [x.value for x in self.orderedContent()] if isinstance(item, TimingValidationMixin) \
and not item._element_badly_timed(value=None, element=item)]
# Order of statements is important
if not children:
# This means we are in a timing container leaf.
if not self._semantic_dataset['timing_end_stack']:
# Here we go an endless document. Pointless but for clarity's sake assign it explicitly to None.
self._computed_end_time = None
else:
# Try to assign it the last specified ancestor
self._computed_end_time = self._semantic_dataset['timing_end_stack'][-1]
else:
children_computed_end_times = [item.computed_end_time for item in children]
if None in children_computed_end_times:
# The endless document case propagates up
self._computed_end_time = None
else:
# Propagate the longest end time among the children
self._computed_end_time = max(children_computed_end_times)
# When we are the body element we need to check that our explicit timings
# are valid, i.e. deal with end before befin by discarding this element
# from computed time calculation as per spec requirement. Since we exclude all
# elements where this is the case using the _element_badly_timed
# function as a filter, this only applies to the body element (on which
# the filter function doesn't get called).
if isinstance(self, BodyTimingValidationMixin) \
and self._element_badly_timed(value=None, element=self):
self._computed_end_time = None
self._computed_begin_time = timedelta(0)
if begin_timedelta is None:
if children is None:
children = [item for item in [x.value for x in self.orderedContent()] if isinstance(item, TimingValidationMixin) \
and not item._element_badly_timed(value=None, element=item)]
self._post_calculate_begin(children=children)
self._post_cleanup_variables()
# The mixin approach is used since there are multiple timed element types.
# The inspected values are all attributes of the element so they do not
# take part in the traversal directly we process them in the timed element's context instead: body, div, p, span
def _semantic_timebase_validation(self, dataset, element_content):
if 'tt_element' in dataset:
time_base = dataset['tt_element'].timeBase
else:
time_base = dataset['ttd_element'].timeBase
if self.begin is not None:
if hasattr(self.begin, 'compatible_timebases'):
# Check typing of begin attribute against the timebase
timebases = self.begin.compatible_timebases()
if time_base not in timebases['begin']:
raise SemanticValidationError(
ERR_SEMANTIC_VALIDATION_TIMING_TYPE.format(
attr_type=type(self.begin),
attr_value=self.begin,
attr_name='begin',
time_base=time_base
)
)
if self.end is not None:
if hasattr(self.end, 'compatible_timebases'):
# Check typing of end attribute against the timebase
timebases = self.end.compatible_timebases()
if time_base not in timebases['end']:
raise SemanticValidationError(
ERR_SEMANTIC_VALIDATION_TIMING_TYPE.format(
attr_type=type(self.end),
attr_value=self.end,
attr_name='end',
time_base=time_base
)
)
def _semantic_manage_timeline(self, dataset, element_content):
# Get the document instance
doc = dataset['document']
# Register on timeline
doc.add_to_timeline(self)
# This section covers the copying operations of timed containers.
# this semantic validation only applies on ebu-tt-d type elements where the the origin and extent units are in %
def _semantic_validate_ttd_active_areas(self, dataset):
# Get the document instance
doc = dataset['document']
if self.computed_begin_time is not None and self.computed_end_time is not None:
affected_elements = doc.lookup_range_on_timeline(self.computed_begin_time, self.computed_end_time)
if len(affected_elements) > 1:
for elem1, elem2 in itertools.combinations(affected_elements, 2):
if elem1 != elem2:
# we only care if the elements both have regions
if elem1.region is not None and elem2.region is not None \
and elem1.region != elem2.region:
# Getting coordinates from the attribute eg ['14% 16%']
elem1_region = dataset['elements_by_id'][elem1.region]
elem2_region = dataset['elements_by_id'][elem2.region]
if elem1_region.overlaps(elem2_region):
raise OverlappingActiveElementsError(
self, elem1_region, elem2_region,
elem1.id, elem2.id)
def is_in_segment(self, begin=None, end=None):
if begin is not None:
if self.computed_end_time is not None and self.computed_end_time <= begin:
return False
if end is not None:
if self.computed_begin_time >= end:
return False
return True
def _assert_in_segment(self, dataset, element_content=None):
if not self.is_in_segment(
begin=dataset['segment_begin'],
end=dataset['segment_end']
):
raise OutsideSegmentError()
def is_timed_leaf(self):
return False
def _semantic_copy_apply_leaf_timing(self, copied_instance, dataset ,element_content=None):
if not copied_instance.is_timed_leaf():
copied_instance.begin = None
copied_instance.end = None
if hasattr(copied_instance, 'dur'):
copied_instance.dur = None
else:
tt_elem = dataset['tt_element']
trimmed_begin = self.computed_begin_time
trimmed_end = self.computed_end_time
segment_begin = dataset['segment_begin']
segment_end = dataset['segment_end']
if segment_begin is not None:
if segment_begin > trimmed_begin:
trimmed_begin = segment_begin
if segment_end is not None:
if trimmed_end is None or trimmed_end > segment_end:
trimmed_end = segment_end
# Create compatible timing types
copied_instance.begin = tt_elem.get_timing_type(trimmed_begin)
copied_instance.end = tt_elem.get_timing_type(trimmed_end)
class BodyTimingValidationMixin(TimingValidationMixin):
"""
The body element seems to be exception from too many rules and makes one common validator pretty difficult
to manage. This subclass is meant to call all the extensions/limitations for the body element that does not apply
to timed containers in general in the EBU-TT-Live spec.
"""
def _pre_init_variables(self, dataset, element_content):
super(BodyTimingValidationMixin, self)._pre_init_variables(dataset, element_content)
self._dur_timedelta = self.dur and self.dur.timedelta or None
def _post_cleanup_variables(self):
del self._dur_timedelta
super(BodyTimingValidationMixin, self)._post_cleanup_variables()
def _pre_calculate_end(self):
# This is all for the body element because of the dur attribute
if self._dur_timedelta is not None:
if self._begin_timedelta is not None and self._end_timedelta is not None:
# This is a special (stupid) edge case..:
proposed_end = min(self._dur_timedelta + self._begin_timedelta, self._end_timedelta)
elif self._begin_timedelta is not None and self._end_timedelta is None:
proposed_end = self._dur_timedelta + self._begin_timedelta
elif self._begin_timedelta is None and self._end_timedelta is None:
# In this case the document end at availability time + dur
proposed_end = self._semantic_dataset['availability_time'] + self._dur_timedelta
elif self._begin_timedelta is None and self._end_timedelta is not None:
proposed_end = min(self._semantic_dataset['availability_time'] + self._dur_timedelta, self._end_timedelta)
else:
# Fallback case if there is no duration specified the same as the other containers
super(BodyTimingValidationMixin, self)._pre_calculate_end()
# WARNING this assigns it so we are done
return
# If one of our special ifs worked let's assign the value here.
self._pre_assign_end(proposed_end)
def _pre_calculate_begin(self):
self._pre_assign_begin(self._begin_timedelta)
def _post_pop_end(self):
end_timedelta = None
if self._end_timedelta is not None or self._dur_timedelta is not None:
# We pushed on the stack it is time to pop it
end_timedelta = self._semantic_dataset['timing_end_stack'].pop()
return end_timedelta
def _semantic_timebase_validation(self, dataset, element_content):
super(BodyTimingValidationMixin, self)._semantic_timebase_validation(dataset, element_content)
time_base = dataset['tt_element'].timeBase
if self.dur is not None:
if hasattr(self.dur, 'compatible_timebases'):
# Check typing of dur attribute against the timebase
timebases = self.dur.compatible_timebases()
if time_base not in timebases['dur']:
raise SemanticValidationError(
ERR_SEMANTIC_VALIDATION_TIMING_TYPE.format(
attr_type=type(self.dur),
attr_value=self.dur,
attr_name='dur',
time_base=time_base
)
)
| 46.720482 | 147 | 0.645933 | 2,398 | 19,389 | 4.981234 | 0.166389 | 0.012558 | 0.02185 | 0.03558 | 0.42118 | 0.350021 | 0.263625 | 0.235998 | 0.175471 | 0.154123 | 0 | 0.002061 | 0.299293 | 19,389 | 414 | 148 | 46.833333 | 0.877153 | 0.280468 | 0 | 0.2875 | 0 | 0 | 0.045435 | 0.003078 | 0 | 0 | 0 | 0 | 0.004167 | 1 | 0.120833 | false | 0 | 0.020833 | 0.016667 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
630f948b220212f854095d5abf28773b052a91d7 | 3,243 | py | Python | cnn_models/vggmodel.py | Zuowei-ZHANG/DCNN | 5bd245ff5d19115c5c6b001e1664d26ba61058e9 | [
"MIT"
] | null | null | null | cnn_models/vggmodel.py | Zuowei-ZHANG/DCNN | 5bd245ff5d19115c5c6b001e1664d26ba61058e9 | [
"MIT"
] | null | null | null | cnn_models/vggmodel.py | Zuowei-ZHANG/DCNN | 5bd245ff5d19115c5c6b001e1664d26ba61058e9 | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch
class VGG(nn.Module):
def __init__(self, features, num_classes=5, init_weights=False):
super(VGG, self).__init__()
self.features = features
#32:1 128:4 224:7
self.classifier1 = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(512*1*1, 4096), #32*32
nn.ReLU(True),
nn.Dropout(p=0.5),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Linear(4096, num_classes)
)
self.classifier4 = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(512*4*4, 4096), #128*128
nn.ReLU(True),
nn.Dropout(p=0.5),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Linear(4096, num_classes)
)
self.classifier7 = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(512*7*7, 4096), #224*224
nn.ReLU(True),
nn.Dropout(p=0.5),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Linear(4096, num_classes)
)
if init_weights:
self._initialize_weights()
def forward(self, x):
# N x 3 x 224 x 224
if torch.cuda.is_available():
x = self.features(x.type(torch.cuda.FloatTensor))
else:
x = self.features(x)
size=list(x.size())[-1]
# N x 512 x size x size
x = torch.flatten(x, start_dim=1)
# N x 512*size*size
if size==1:
x = self.classifier1(x)
if size==4:
x = self.classifier4(x)
if size==7:
x = self.classifier7(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
#nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
# nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_features(cfg: list):
layers = []
in_channels = 3
for v in cfg:
if v == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg(model_name="VGG16", **kwargs):
try:
cfg = cfgs[model_name]
except:
print("Warning: model number {} not in cfgs dict!".format(model_name))
exit(-1)
model = VGG(make_features(cfg), **kwargs)
return model
| 30.885714 | 117 | 0.505088 | 448 | 3,243 | 3.566964 | 0.227679 | 0.052566 | 0.035044 | 0.041302 | 0.354193 | 0.353567 | 0.293492 | 0.264706 | 0.2597 | 0.233417 | 0 | 0.134768 | 0.341042 | 3,243 | 104 | 118 | 31.182692 | 0.613009 | 0.050262 | 0 | 0.26506 | 0 | 0 | 0.032248 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060241 | false | 0 | 0.024096 | 0 | 0.13253 | 0.012048 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6310154c0b62aabff8333a1130ec57c94496955d | 16,656 | py | Python | minemeld/flask/feedredis.py | hasadi-ha/minemeld-core | eb135597ce895b78f4c2ec272ffc8a45a12962bd | [
"Apache-2.0"
] | null | null | null | minemeld/flask/feedredis.py | hasadi-ha/minemeld-core | eb135597ce895b78f4c2ec272ffc8a45a12962bd | [
"Apache-2.0"
] | null | null | null | minemeld/flask/feedredis.py | hasadi-ha/minemeld-core | eb135597ce895b78f4c2ec272ffc8a45a12962bd | [
"Apache-2.0"
] | null | null | null | # Copyright 2015-2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cStringIO
import json
import re
from collections import defaultdict
from contextlib import contextmanager
import unicodecsv
from flask import request, jsonify, Response, stream_with_context
from flask.ext.login import current_user
from gevent import sleep
from netaddr import IPRange, IPNetwork, IPSet, AddrFormatError
from .aaa import MMBlueprint
from .cbfeed import CbFeedInfo, CbReport
from .logger import LOG
from .mmrpc import MMMaster
from .redisclient import SR
__all__ = ['BLUEPRINT']
FEED_INTERVAL = 100
_PROTOCOL_RE = re.compile('^(?:[a-z]+:)*//')
_INVALID_TOKEN_RE = re.compile('(?:[^\./+=\?&]+\*[^\./+=\?&]*)|(?:[^\./+=\?&]*\*[^\./+=\?&]+)')
_IPV4_MASK_RE = re.compile('^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}(\\/[0-9]+)?$')
_IPV4_RANGE_RE = re.compile(
'^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}-[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$')
BLUEPRINT = MMBlueprint('feeds', __name__, url_prefix='/feeds')
def _translate_ip_ranges(indicator, value=None):
if value is not None and value['type'] != 'IPv4':
return [indicator]
try:
ip_range = IPRange(*indicator.split('-', 1))
except (AddrFormatError, ValueError, TypeError):
return [indicator]
return [str(x) if x.size != 1 else str(x.network) for x in ip_range.cidrs()]
@contextmanager
def _buffer():
result = cStringIO.StringIO()
try:
yield result
finally:
result.close()
def generate_panosurl_feed(feed, start, num, desc, value, **kwargs):
zrange = SR.zrange
if desc:
zrange = SR.zrevrange
if num is None:
num = (1 << 32) - 1
cstart = start
while cstart < (start + num):
ilist = zrange(feed, cstart,
cstart - 1 + min(start + num - cstart, FEED_INTERVAL))
for i in ilist:
i = i.lower()
i = _PROTOCOL_RE.sub('', i)
i = _INVALID_TOKEN_RE.sub('*', i)
yield i + '\n'
if len(ilist) < 100:
break
cstart += 100
def generate_plain_feed(feed, start, num, desc, value, **kwargs):
zrange = SR.zrange
if desc:
zrange = SR.zrevrange
if num is None:
num = (1 << 32) - 1
translate_ip_ranges = kwargs.pop('translate_ip_ranges', False)
cstart = start
while cstart < (start + num):
ilist = zrange(feed, cstart,
cstart - 1 + min(start + num - cstart, FEED_INTERVAL))
if translate_ip_ranges:
ilist = [xi for i in ilist for xi in _translate_ip_ranges(i)]
yield '\n'.join(ilist) + '\n'
if len(ilist) < 100:
break
cstart += 100
def generate_json_feed(feed, start, num, desc, value, **kwargs):
zrange = SR.zrange
if desc:
zrange = SR.zrevrange
if num is None:
num = (1 << 32) - 1
translate_ip_ranges = kwargs.pop('translate_ip_ranges', False)
if value == 'json':
yield '[\n'
cstart = start
firstelement = True
while cstart < (start + num):
ilist = zrange(feed, cstart,
cstart - 1 + min(start + num - cstart, FEED_INTERVAL))
result = cStringIO.StringIO()
for indicator in ilist:
v = SR.hget(feed + '.value', indicator)
xindicators = [indicator]
if translate_ip_ranges and '-' in indicator:
xindicators = _translate_ip_ranges(indicator, None if v is None else json.loads(v))
if v is None:
v = 'null'
for i in xindicators:
if value == 'json' and not firstelement:
result.write(',\n')
if value == 'json-seq':
result.write('\x1E')
result.write('{"indicator":"')
result.write(i)
result.write('","value":')
result.write(v)
result.write('}')
if value == 'json-seq':
result.write('\n')
firstelement = False
yield result.getvalue()
result.close()
if len(ilist) < 100:
break
cstart += 100
if value == 'json':
yield ']\n'
def generate_csv_feed(feed, start, num, desc, value, **kwargs):
def _is_atomic_type(fv):
return (isinstance(fv, unicode) or isinstance(fv, str) or isinstance(fv, int) or isinstance(fv, bool))
def _format_field_value(fv):
if _is_atomic_type(fv):
return fv
if isinstance(fv, list):
ok = True
for fve in fv:
ok &= _is_atomic_type(fve)
if ok:
return ','.join(fv)
return json.dumps(fv)
zrange = SR.zrange
if desc:
zrange = SR.zrevrange
if num is None:
num = (1 << 32) - 1
translate_ip_ranges = kwargs.pop('translate_ip_ranges', False)
# extract name of fields and column names
columns = []
fields = []
for addf in kwargs.pop('f', []):
if '|' in addf:
fname, cname = addf.rsplit('|', 1)
else:
fname = addf
cname = addf
columns.append(cname)
fields.append(fname)
# if no fields are specified, only indicator is generated
if len(fields) == 0:
fields = ['indicator']
columns = ['indicator']
# check if header should be generated
header = kwargs.pop('h', None)
if header is None:
header = True
else:
header = int(header[0])
# check if bom should be generated
ubom = kwargs.pop('ubom', None)
if ubom is None:
ubom = False
else:
ubom = int(ubom[0])
cstart = start
if ubom:
LOG.debug('BOM')
yield '\xef\xbb\xbf'
with _buffer() as current_line:
w = unicodecsv.DictWriter(
current_line,
fieldnames=columns,
encoding='utf-8'
)
if header:
w.writeheader()
yield current_line.getvalue()
while cstart < (start + num):
ilist = zrange(feed, cstart,
cstart - 1 + min(start + num - cstart, FEED_INTERVAL))
for indicator in ilist:
v = SR.hget(feed + '.value', indicator)
v = None if v is None else json.loads(v)
xindicators = [indicator]
if translate_ip_ranges and '-' in indicator:
xindicators = _translate_ip_ranges(indicator, v)
for i in xindicators:
fieldvalues = {}
for f, c in zip(fields, columns):
if f == 'indicator':
fieldvalues[c] = i
continue
if v is not None and f in v:
fieldvalues[c] = _format_field_value(v[f])
current_line.truncate(0)
w.writerow(fieldvalues)
yield current_line.getvalue()
if len(ilist) < FEED_INTERVAL:
break
cstart += FEED_INTERVAL
def generate_mwg_feed(feed, start, num, desc, value, **kwargs):
zrange = SR.zrange
if desc:
zrange = SR.zrevrange
if num is None:
num = (1 << 32) - 1
translate_ip_ranges = kwargs.pop('translate_ip_ranges', False)
type_ = kwargs.get('t', None)
if type_ is None:
type_ = 'string'
else:
type_ = type_[0]
translate_ip_ranges |= type_ == 'ip'
yield 'type={}\n'.format(type_)
cstart = start
while cstart < (start + num):
ilist = zrange(feed, cstart,
cstart - 1 + min(start + num - cstart, FEED_INTERVAL))
for indicator in ilist:
v = SR.hget(feed + '.value', indicator)
v = None if v is None else json.loads(v)
xindicators = [indicator]
if translate_ip_ranges and '-' in indicator:
xindicators = _translate_ip_ranges(indicator, v)
sources = 'from minemeld'
if v is not None:
sources = v.get('sources', 'from minemeld')
if isinstance(sources, list):
sources = ','.join(sources)
for i in xindicators:
yield '"{}" "{}"\n'.format(
i.replace('"', '\\"'),
sources.replace('"', '\\"')
)
if len(ilist) < 100:
break
cstart += 100
# This formatter implements BlueCoat custom URL format as described at
# https://www.bluecoat.com/documents/download/a366dc73-d455-4859-b92a-c96bd034cb4c/f849f1e3-a906-4ee8-924e-a2061dfe3cdf
# It expects the value 'bc_category' in the indicator. The value can be either a single string or a list of strings.
# Optional feed arguments:
# ca : Indicator's attribute that hosts the BlueCoat category. Defaults to 'bc_category'
# cd : Default BlueCoat category for indicators that do not have 'catattr'. This argument can appear multiple
# times and it will be handled as a list of categories the indicator belongs to. If not present then
# indicators without 'catattr' will be discarded.
def generate_bluecoat_feed(feed, start, num, desc, value, **kwargs):
zrange = SR.zrange
ilist = zrange(feed, 0, (1 << 32) - 1)
bc_dict = defaultdict(list)
flag_category_default = kwargs.get('cd', None)
flag_category_attr = kwargs.get('ca', ['bc_category'])[0]
for i in ilist:
sleep(0)
v = SR.hget(feed + '.value', i)
v = None if v is None else json.loads(v)
i = i.lower()
i = _PROTOCOL_RE.sub('', i)
i = _INVALID_TOKEN_RE.sub('*', i)
if v is None:
if flag_category_default is None:
continue
else:
bc_cat_list = flag_category_default
else:
bc_cat_attr = v.get(flag_category_attr, None)
if isinstance(bc_cat_attr, list):
bc_cat_list = bc_cat_attr
elif isinstance(bc_cat_attr, basestring):
bc_cat_list = [bc_cat_attr]
elif flag_category_default is not None:
bc_cat_list = flag_category_default
else:
continue
for bc_cat in bc_cat_list:
bc_dict[bc_cat].append(i)
for key, value in bc_dict.iteritems():
yield 'define category {}\n'.format(key)
for ind in value:
yield ind + '\n'
yield 'end\n'
def generate_carbon_black(feed, start, num, desc, value, **kwargs):
zrange = SR.zrange
ilist = zrange(feed, 0, (1 << 32) - 1)
mm_to_cb = {"IPv4": "ipv4",
"domain": "dns",
"md5": "md5"}
ind_by_type = {"dns": [],
"md5": []}
# Let's stream the information as soon as we have it
yield "{\n\"feedinfo\": {\n"
cb_feed_info = CbFeedInfo(name=feed)
for cb_info_parts in cb_feed_info.iterate():
yield " " + cb_info_parts
yield "\n},\n\"reports\": [{"
report_args = dict()
report_args["id"] = feed + "_report"
report_title = kwargs.get('rt', ["MieneMeld Generated Report"])
if report_title is not None:
report_title = report_title[0]
report_args["title"] = report_title
report_score = kwargs.get('rs', None)
if report_score is not None:
try:
report_score = int(report_score[0])
except ValueError:
report_score = None
report_args["score"] = report_score
cb_report = CbReport(**report_args)
for cb_report_parts in cb_report.iterate():
yield " " + cb_report_parts
yield ", \"iocs\": {"
yield " \"ipv4\": ["
# Loop though all indicators
# Only indicators of type IPv4, domain and md5 can be exported to Carbon Black
ipv4_line = None
for i in ilist:
sleep(0)
v = SR.hget(feed + '.value', i)
v = None if v is None else json.loads(v)
if v is None:
continue
v_type = v.get("type", None)
if v_type not in mm_to_cb:
continue
if v_type in ("domain", "md5"):
ind_by_type[mm_to_cb[v_type]].append(i.lower())
continue
# Carbon Black do not support IPv4 networks not ranges. We must expand them.
ip_range = None
if _IPV4_MASK_RE.match(i):
ip_range = IPSet(IPNetwork(i))
elif _IPV4_RANGE_RE.match(i):
range_parts = i.split("-")
ip_range = IPRange(range_parts[0], range_parts[1])
for ip_addr in ip_range:
if ipv4_line is not None:
yield ipv4_line + ","
ipv4_line = "\"{}\"".format(str(ip_addr))
yield ("" if ipv4_line is None else ipv4_line) + "],"
yield "\"dns\": {},".format(json.dumps(ind_by_type["dns"]))
yield "\"md5\": {}".format(json.dumps(ind_by_type["md5"]))
yield "}}]}"
_FEED_FORMATS = {
'json': {
'formatter': generate_json_feed,
'mimetype': 'application/json'
},
'json-seq': {
'formatter': generate_json_feed,
'mimetype': 'application/json-seq'
},
'panosurl': {
'formatter': generate_panosurl_feed,
'mimetype': 'text/plain'
},
'mwg': {
'formatter': generate_mwg_feed,
'mimetype': 'text/plain'
},
'bluecoat': {
'formatter': generate_bluecoat_feed,
'mimetype': 'text/plain'
},
'carbonblack': {
'formatter': generate_carbon_black,
'mimetype': 'application/json'
},
'csv': {
'formatter': generate_csv_feed,
'mimetype': 'text/csv'
}
}
@BLUEPRINT.route('/<feed>', methods=['GET'], feeds=True, read_write=False)
def get_feed_content(feed):
if not current_user.check_feed(feed):
return 'Unauthorized', 401
# check if feed exists
status = MMMaster.status()
tr = status.get('result', None)
if tr is None:
return jsonify(error={'message': status.get('error', 'error')})
nname = 'mbus:slave:' + feed
if nname not in tr:
return jsonify(error={'message': 'Unknown feed'}), 404
nclass = tr[nname].get('class', None)
if nclass != 'minemeld.ft.redis.RedisSet':
return jsonify(error={'message': 'Unknown feed'}), 404
start = request.values.get('s')
if start is None:
start = 0
try:
start = int(start)
if start < 0:
raise ValueError()
except ValueError:
LOG.error("Invalid request, s not a non-negative integer: %s", start)
return jsonify(error="s should be a positive integer"), 400
num = request.values.get('n')
if num is not None:
try:
num = int(num)
if num <= 0:
raise ValueError()
except ValueError:
LOG.error("Invalid request, n not a positive integer: %s", num)
return jsonify(error="n should be a positive integer"), 400
else:
num = None
desc = request.values.get('d')
desc = (False if desc is None else True)
value = request.values.get('v')
if value is not None and value not in _FEED_FORMATS:
return jsonify(error="unknown format %s" % value), 400
kwargs = {}
kwargs['translate_ip_ranges'] = int(request.values.get('tr', 0)) # generate IP ranges
# move to kwargs all the additional parameters, pop the predefined
kwargs.update(request.values.to_dict(flat=False))
kwargs.pop('s', None)
kwargs.pop('n', None)
kwargs.pop('d', None)
kwargs.pop('v', None)
kwargs.pop('tr', None)
mimetype = 'text/plain'
formatter = generate_plain_feed
if value in _FEED_FORMATS:
formatter = _FEED_FORMATS[value]['formatter']
mimetype = _FEED_FORMATS[value]['mimetype']
return Response(
stream_with_context(
formatter(feed, start, num, desc, value, **kwargs)
),
mimetype=mimetype
)
| 29.375661 | 119 | 0.563341 | 2,067 | 16,656 | 4.406386 | 0.184809 | 0.013834 | 0.035463 | 0.00527 | 0.306763 | 0.296004 | 0.27635 | 0.230786 | 0.230786 | 0.218928 | 0 | 0.019663 | 0.316042 | 16,656 | 566 | 120 | 29.427562 | 0.779846 | 0.106208 | 0 | 0.364078 | 0 | 0.004854 | 0.093449 | 0.016293 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029126 | false | 0 | 0.036408 | 0.002427 | 0.101942 | 0.004854 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
631360935e0f8afbe2d79b07ef45d939ae1e4a79 | 601 | py | Python | lib/image_processor.py | dipghoshraj/AR-filter | c04f04b20daf95d6bb8a552919e62d9ee047e8a2 | [
"MIT"
] | null | null | null | lib/image_processor.py | dipghoshraj/AR-filter | c04f04b20daf95d6bb8a552919e62d9ee047e8a2 | [
"MIT"
] | null | null | null | lib/image_processor.py | dipghoshraj/AR-filter | c04f04b20daf95d6bb8a552919e62d9ee047e8a2 | [
"MIT"
] | null | null | null | from PIL import Image
from lib.lips_marger import detection
import cv2, imutils, io
import numpy as np
def imageProcessor(image, colors):
"""
"""
blob = image.read()
B, G, R = colors.split(',')
b = io.BytesIO(blob)
pimg = Image.open(b).convert('RGB')
# converting RGB to BGR, as opencv standards
frame = cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)
dataframe = detection(frame, B, G, R)
if dataframe is not None:
frame = dataframe
frame = imutils.resize(frame, width=300)
imgencode = cv2.imencode('.jpg', frame)[1]
return imgencode
| 23.115385 | 59 | 0.648918 | 83 | 601 | 4.674699 | 0.614458 | 0.010309 | 0.015464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019397 | 0.227953 | 601 | 25 | 60 | 24.04 | 0.81681 | 0.069884 | 0 | 0 | 0 | 0 | 0.014652 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.25 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6313b6c8dc1eaa78436bb7216b97a5cca8b95b3f | 1,149 | py | Python | app/db.py | SmileyJoe/github_backup | 61a42a89637e3a936921ed010d0e948953c4738b | [
"Apache-2.0"
] | null | null | null | app/db.py | SmileyJoe/github_backup | 61a42a89637e3a936921ed010d0e948953c4738b | [
"Apache-2.0"
] | 3 | 2021-07-11T21:17:33.000Z | 2021-12-13T21:01:22.000Z | app/db.py | SmileyJoe/github_backup | 61a42a89637e3a936921ed010d0e948953c4738b | [
"Apache-2.0"
] | null | null | null | from datetime import datetime, timezone
import os
import json
# Simple json file to be used for storing data between runs
class Db:
FILE = os.path.join(os.path.dirname(__file__), "db.json")
def __init__(self):
# if the file doesn't exist yet, set the defaults
if not os.path.isfile(self.FILE):
self._db = {
"last_run": datetime(1, 1, 1, 0, 0, 0, 0, timezone.utc).isoformat()
}
# else parse the json into a dict
else:
file = open(self.FILE, "r")
self._db = json.load(file)
file.close()
@property
# this is a timestamp in UTC+0 in isoformat for the last time the script ran
# it is used to ignore repos that haven't been updated since when the script is called again
def last_run(self):
return self._db["last_run"]
@last_run.setter
# sets the property from a datetime object
def last_run(self, last_run):
self._db["last_run"] = last_run.isoformat()
# save the json to the file
def save(self):
with open(self.FILE, 'w') as f:
json.dump(self._db, f, indent=4) | 31.916667 | 96 | 0.610096 | 177 | 1,149 | 3.841808 | 0.451977 | 0.082353 | 0.044118 | 0.057353 | 0.058824 | 0.058824 | 0 | 0 | 0 | 0 | 0 | 0.01107 | 0.292428 | 1,149 | 36 | 97 | 31.916667 | 0.825338 | 0.322019 | 0 | 0 | 0 | 0 | 0.042746 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0 | 0.130435 | 0.043478 | 0.434783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63153d60b74afca5bf8c2c77ff2b3c78627e8dd5 | 13,444 | py | Python | calibration/util.py | uwgraphics/SPD-Geometric-Calibration | 2284fd6edbd265ded163deec85f47303e6a626f5 | [
"MIT"
] | null | null | null | calibration/util.py | uwgraphics/SPD-Geometric-Calibration | 2284fd6edbd265ded163deec85f47303e6a626f5 | [
"MIT"
] | null | null | null | calibration/util.py | uwgraphics/SPD-Geometric-Calibration | 2284fd6edbd265ded163deec85f47303e6a626f5 | [
"MIT"
] | null | null | null | import numpy as np
import csv
from scipy.optimize import minimize
from scipy.spatial.transform import Rotation as R
ID = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
])
def random_unit_vector():
"""Generate a random 3D unit vector
Returns:
np.array: a random 3D unit vector
"""
z = np.random.uniform(-1, 1)
theta = np.random.uniform(0, 2*np.pi)
return(np.array([
np.sqrt(1-z**2)*np.cos(theta),
np.sqrt(1-z**2)*np.sin(theta),
z
]))
def gen_observation(p, u, a, d, epsilon=1e-6):
"""Generate an observation from a point looking at a plane.
Generates an observation (distance and observation point) for a sensor at
location p looking in the direction given by the vector u looking at the
plane defined by a[0]x + a[1]y + a[2]z + d = 0.
https://rosettacode.org/wiki/Find_the_intersection_of_a_line_with_a_plane#Python
Args:
p (3-tuple of floats): the position of the sensor (x, y, z).
u (3-tuple of floats): the orientation of the sensor (x, y, z).
Does not have to be a unit vector.
a (3-tuple of floats): the equation for the line where a[0]x + a[1]y + a[2]z + d = 0.
d (float) the c portion of the line equation.
Returns:
The distance and intersection point as a tuple, for example, with distance
5.2 and intersection point (8.1, 0.3, 4):
(5.2, (8.1, 0.3, 4)) or float('inf') if the sensor does not see the plane.
Raises:
ValueError: The line is undefined.
"""
a = np.array(a)
p = np.array(p)
u = np.array(u)
if(a[0] != 0):
plane_point = np.array([-d/a[0], 0, 0])
elif(a[1] != 0):
plane_point = np.array([0, -d/a[1], 0])
elif(a[2] != 0):
plane_point = np.array([0, 0, -d/a[2]])
else:
raise ValueError("The plane with normal a=[0,0,0] is undefined")
ndotu = a.dot(u)
if abs(ndotu) < epsilon:
return float('inf')
w = p - plane_point
si = -a.dot(w) / ndotu
Psi = w + si * u + plane_point
dist = np.linalg.norm(Psi - p)
if(np.allclose((dist * u) + p, Psi)):
return (dist, Psi)
else:
return float('inf')
def angle_between(u1, u2):
"""Get the angle between two unit vectors, in radians
Args:
u1: unit vector
u2: unit vector
Returns:
(float): angle between u1 and u2, in radians
"""
u1 = np.array(u1)
u2 = np.array(u2)
assert(
np.abs(np.linalg.norm(u1) - 1 < 0.0001)
and np.abs(np.linalg.norm(u1) - 1 < 0.0001)
)
angle = np.arccos(np.dot(u1, u2) / (np.linalg.norm(u1) * np.linalg.norm(u2)))
return angle
def generate_motions(p, u, a, d, plane_center, bbox, radius=500, n=32):
"""Generate random robot motions that point sensor at plane
Generate n motions that keep the sensor at position p and orientation u
pointing at the plane given by a[0] + a[1] + a[2] + d = 0
Args:
p: 3D position of sensor on robot segment
u: heading unit vector for sensor
a: a vector for plane in equation ax+d=0
d: d scalar for plane in equation ax+d=0
plane_center: where on the plane to center the points you're aiming for
around
bbox: bounding box for the sensor, given as a 2D array with like so:
[
[xmin, xmax],
[ymin, ymax],
[zmin, zmax]
]
radius (default 500): how far from the center intersection points on the
plane should be
n (default 32): how many motions to generate
Returns:
(n x 4 x 4 array): robot motions as transforms in homogenous coordinates
"""
p = np.array(p)
u = np.array(u)
a = np.array(a)
# generate points on plane
xs = scattered_on_plane(a, d, plane_center.reshape(3), radius, n)
# generate positions for sensor in space
ps = []
while (len(ps) < len(xs)):
pt = np.array([
np.random.uniform(*bbox[0]),
np.random.uniform(*bbox[1]),
np.random.uniform(*bbox[2])
])
# check that pt is on the same side of the plane as the center of the robot
if (np.sign(np.dot(a,pt)+d) == np.sign(np.dot(a, np.array([0, 0, 0]))+d)):
# check that pt is at least 10cm away from the plane
if (np.abs(np.dot(a,pt)+d) > 100):
ps.append(pt)
# generate unit vectors that point sensor points to plane points
us = [(p - s) / np.linalg.norm(p - s) for p, s in zip(xs, ps)]
# convert list of points and unit vectors to list of transforms
tfs = points_to_transforms([p, *ps], [u, *us])
return tfs
def scattered_on_plane(a, d, center, radius, num_points):
"""Generate points scattered on the plane given by a, d
Args:
a: a parameter for plane (3D vector)
d: d parameter for plane
center: center point from which points are scattered
radius: radius of scattered points
num_points: number of scattered points
Returns:
(num_points x 3 array): coordinates of points on plane
"""
if(np.dot(a, center)+d > 0.00001):
raise ValueError("center is not on plane given by ax+d=0")
# generate a random vector and make it orthogonal to a
# https://stackoverflow.com/questions/33658620/generating-two-orthogonal-vectors-that-are-orthogonal-to-a-particular-direction
xvec = np.random.randn(3)
xvec -= xvec.dot(a) * a / np.linalg.norm(a)**2
xvec /= np.linalg.norm(xvec)
yvec = np.cross(a, xvec)
points = []
for _ in range(num_points):
xcomp = np.random.uniform(-radius, radius)
ycomp = np.random.uniform(-radius, radius)
points.append(center + (xcomp*xvec + ycomp*yvec))
return points
def points_to_transforms(points, units):
"""Convert a set of points to a set of transforms
Arguments:
points (list of 3-tuples): point positions (first is starting pos)
units (list of 3-tuples): unit vector directions (first is starting)
Returns:
(list of 4x4 np.array): transformations leading from starting point to
each other point (first will be identity)
"""
return([get_transform(points[0], units[0], pt, u) for pt, u in zip(points[1:], units[1:])])
def to_hom(vec):
"""Takes a numpy array and adds a 1 to the end of it
"""
return np.append(vec, [1])
def from_hom(vec):
"""Takes a numpy array and removes a 1 from the end of it
"""
return vec[:-1]
def get_transform(p1, u1, p2, u2):
"""Get the transform from pos. p1, rot. u1 to pos. p2, rot. u2
Arguments:
p1 (3-tuple): x, y, z coordinates of starting position
u1 (3-tuple): x, y, z coordinates of starting unit vector orientation
p2 (3-tuple): x, y, z coordinates of final position
u2 (3-tuple): x, y, z coordinates of final unit vector orientation
Returns:
(4x4 np.array): the transform from p1, u1 to p2, u2 in homogenous coord.
https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d/476311#476311
"""
u1 = np.array(u1)
u2 = np.array(u2)
p1 = np.array(p1)
p2 = np.array(p2)
if(np.allclose(u1, u2)):
R = np.identity(3)
else:
v = np.cross(u1, u2)
s = np.linalg.norm(v)
if(s == 0):
if(u1[0] == u2[1] and u1[1] == u2[0]): #BUG there are other cases like this that aren't covered
R = np.array([
[0, 1, 0],
[1, 0, 1],
[0, 0, 1]
])
else:
c = np.dot(u1, u2)
vx = np.array([
[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]
])
R = np.identity(3) + vx + (vx @ vx) * ((1 - c)/(s*s))
new_p = R @ p1
t = p2 - new_p
tf = np.array([
[R[0][0], R[0][1], R[0][2], t[0]],
[R[1][0], R[1][1], R[1][2], t[1]],
[R[2][0], R[2][1], R[2][2], t[2]],
[0, 0, 0, 1]
])
# tf * p1 should = p2
assert(np.allclose(tf @ np.append(p1, 1), np.append(p2, 1)))
# tf * u1 (with 0 for third coordinate - no translation) should = u2
assert(np.allclose(tf @ np.append(u1, 0), np.append(u2, 0)))
return tf
def read_data(data_path):
"""Read real-world trial data from given folder
Arguments:
data_path (string): path to folder containing measurements.csv and
transforms.csv files to be read in
Returns:
(tuple of arrays): measurements array and transforms array populated with
data from data_path/measurements.csv and data_path/transforms.csv
"""
with open(data_path + "/measurements.csv") as f:
csvfile = csv.reader(f)
measurements = []
for line in csvfile:
measurements.append(np.average([float(x) for x in line[1:]]))
measurements = np.array(measurements)
with open(data_path + "/transforms.csv") as f:
csvfile = csv.reader(f)
raw_transforms = []
for line in csvfile:
items = []
for item in line:
if(item != ' '):
items.append(float(item))
raw_transforms.append(np.reshape(np.array(items), (4,4)))
# change unit of transforms from meters to mm
transforms = [rescale_transform(tf, 1000) for tf in raw_transforms]
return(measurements, transforms)
def rescale_transform(tf, scale):
"""Rescale a 4x4 homogenous transform matrix by some factor
Arguments:
tf (4x4 np.array): the 4x4 homogenous transform matrix to scale
scale: how much to scale it by
Returns:
(4x4 np.array): scaled 4x4 homogenous transform matrix
"""
new_tf = np.array([
[tf[0][0], tf[0][1], tf[0][2], tf[0][3]*scale],
[tf[1][0], tf[1][1], tf[1][2], tf[1][3]*scale],
[tf[2][0], tf[2][1], tf[2][2], tf[2][3]*scale],
[tf[3][0], tf[3][1], tf[3][2], tf[3][3]],
])
return new_tf
def fit_plane(pts):
"""Fit a plane given by ax+d = 0 to a set of points
Works by minimizing the sum over all points x of ax+d
Arguments:
pts: array of points in 3D space
Returns:
(3x1 numpy array): a vector for plane equation
(float): d in plane equation
(float): sum of residuals for points to plane (orthogonal l2 distance)
"""
pts = np.array(pts)
def loss_fn(x, points):
a = np.array(x[:3])
d = x[3]
loss = 0
for point in points:
loss += np.abs(np.dot(a, np.array(point)) + d)
return loss
def a_constraint(x):
return np.linalg.norm(x[:3]) - 1
soln = minimize(
loss_fn,
np.array([0, 1, 0, 0]),
args=(pts),
method='slsqp',
constraints=[
{
'type': 'eq',
'fun': a_constraint
}
]
)
a = soln.x[:3]
d = soln.x[3]
res = soln.fun
return a, d, res
def perturb_p(p, radius, symmetrical=False):
"""Perturb a point a random amount in each direction
Arguments:
(np.array) p: a point to be perturbed
(float) radius: amount to perturb in each direction (random)
(boolean) symmetrical: whether to return the symmetrical negative
version of each perturbation along with the positive version
Returns:
np.array: perturbed point
"""
if symmetrical:
return (
p + np.random.uniform(-radius, radius, 3),
p - np.random.uniform(-radius, radius, 3)
)
else:
return p + np.random.uniform(-radius, radius, 3)
def perturb_u(u, angle_range, symmetrical=False):
"""
Perturb a unit vector along a given angle range
Arguments:
(np.array) u: 3D unit vector to perturb
(float) angle_range: range of angles upon which to randomly perturb
within (uniform)
(boolean) symmetrical: whether to return the symmetrical negative
version of each perturbation along with the positive version
Returns:
(np.array): randomly perturbed unit vector
"""
angle = np.random.uniform(-angle_range, angle_range)
if symmetrical:
# generate a random vector and make it orthogonal to x
# this will be the axis for our axis angle rotation
# https://stackoverflow.com/questions/33658620/generating-two-orthogonal-vectors-that-are-orthogonal-to-a-particular-direction
axis = np.random.randn(3)
axis -= axis.dot(u) * u / np.linalg.norm(u)**2
axis /= np.linalg.norm(axis)
rot1 = R.from_rotvec(np.radians(angle) * axis)
rot2 = R.from_rotvec(np.radians(-angle) * axis)
return(
rot1.apply(u),
rot2.apply(u)
)
else:
# generate a random vector and make it orthogonal to x
# this will be the axis for our axis angle rotation
# https://stackoverflow.com/questions/33658620/generating-two-orthogonal-vectors-that-are-orthogonal-to-a-particular-direction
axis = np.random.randn(3)
axis -= axis.dot(u) * u / np.linalg.norm(u)**2
axis /= np.linalg.norm(axis)
rot = R.from_rotvec(np.radians(angle) * axis)
return(rot.apply(u)) | 30.905747 | 134 | 0.579738 | 2,047 | 13,444 | 3.774304 | 0.162677 | 0.034429 | 0.021745 | 0.01359 | 0.265856 | 0.21913 | 0.196868 | 0.17124 | 0.128009 | 0.116101 | 0 | 0.040072 | 0.29463 | 13,444 | 435 | 135 | 30.905747 | 0.774649 | 0.45299 | 0 | 0.19802 | 0 | 0 | 0.019862 | 0 | 0 | 0 | 0 | 0 | 0.014851 | 1 | 0.079208 | false | 0 | 0.019802 | 0.00495 | 0.173267 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
631a8ff40c9aa292f9c3fb3aba9c6eb6cc3e4d02 | 954 | py | Python | _scripts/manual/convert2decorations.py | Son-Guhun/Titan-Land-Lands-of-Plenty | edeca1d5437a7397195799ebf4b9585ee4609fed | [
"MIT"
] | 12 | 2019-05-27T16:02:28.000Z | 2021-01-08T09:32:08.000Z | _scripts/manual/convert2decorations.py | Son-Guhun/Titan-Land-Lands-of-Plenty | edeca1d5437a7397195799ebf4b9585ee4609fed | [
"MIT"
] | 209 | 2019-04-06T15:16:52.000Z | 2021-07-03T02:11:53.000Z | _scripts/manual/convert2decorations.py | Son-Guhun/Titan-Land-Lands-of-Plenty | edeca1d5437a7397195799ebf4b9585ee4609fed | [
"MIT"
] | 1 | 2021-05-26T12:13:35.000Z | 2021-05-26T12:13:35.000Z | """This script converts each CUSTOM unit in a list to a decoration, using 'h038' as the base unit.
Does not function for default units, as the _parent object field is overriden.
"""
import os
from myconfigparser import MyConfigParser, load_unit_data, get_decorations
fields_to_keep = [
'Name', 'EditorSuffix', 'Tip', 'Ubertip', 'Art', 'file'
]
units_list = 'h00J,h084,u05F,e010,e00Z,u018,e02D,o02E,h0HP,h06Q,h0NE,h06D,h00I,h11T,h06G,h0DX,h05W'
units_list = units_list.split(',')
def do(dataBase, units_list):
with open(dataBase) as f:
unit_data = load_unit_data(f)
for rawcode in units_list:
unit = unit_data[rawcode]
values = {k:unit[k] if k in unit else None for k in fields_to_keep}
unit_data[rawcode] = unit_data['h038']
for k,v in values.items():
if v is not None:
unit[k] = v
with open(dataBase, 'w') as f:
unit_data.write(f)
| 28.909091 | 99 | 0.649895 | 146 | 954 | 4.109589 | 0.527397 | 0.093333 | 0.04 | 0.036667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055172 | 0.240042 | 954 | 32 | 100 | 29.8125 | 0.772414 | 0.183438 | 0 | 0 | 0 | 0.052632 | 0.159533 | 0.108949 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
631d3f438b5ff1c52f372a1a15d8c2e168500814 | 17,781 | py | Python | PDF_Excel_Word/main.py | Tom-Stack-UX/Web-Scraper | e12ca5551dddb89ccf812620a35c469ea9287a77 | [
"MIT"
] | 1 | 2021-03-08T06:23:58.000Z | 2021-03-08T06:23:58.000Z | PDF_Excel_Word/main.py | Tom-Stack-UX/Web-Scraper | e12ca5551dddb89ccf812620a35c469ea9287a77 | [
"MIT"
] | null | null | null | PDF_Excel_Word/main.py | Tom-Stack-UX/Web-Scraper | e12ca5551dddb89ccf812620a35c469ea9287a77 | [
"MIT"
] | null | null | null | #! python3
# Your one stop destination for all your assignment needs
# Convert your assignments to PDFs, watermark them, merge them and encrypt them, all in one place
# Get to know the birthdays of your classmates and send them a birthday wish to show you care
# Convert your PDF into audiobook and learn on the go
#We import all the required modules required for the program
import os, openpyxl, PyPDF2, pyttsx3, docx, logging, sys, time
from openpyxl.styles import Font, Color, NamedStyle, Alignment, Border, Side, colors
from docx2pdf import convert
from docx.enum.text import WD_ALIGN_PARAGRAPH
from datetime import datetime #retrieve date and time
from termcolor import colored #For adding colored text
#Function to clear the output screen
def screen_clear():
_ = os.system('cls')
'''
birthdays.xlsx has the names and birth days of all students in the class
We pass "birthdays" as the filename.
Here we retrieve the names of those having birthdays in the current month.
We can also find out the birthday of any person present in the class
'''
def excel_operations(filename):
screen_clear()
if not os.path.exists(filename + ".xlsx"):
logging.error(colored('File does not exist', 'red')) # if file does not exist, return the control
return
screen_clear() # clear the screen
wb = openpyxl.load_workbook(filename + '.xlsx')
# takes in the filename and returns a value of the workbook data type
sheet = wb['Sheet1'] # Worksheet object
#Freeze the first row so that it is
#always visible to the user even as they scroll through the spreadsheet
sheet.freeze_panes = 'A2'
#Set font styles to the first row
boldFont = Font(bold=True)
center_align = Alignment(horizontal="center", vertical="center")
border_text = Border(bottom=Side(border_style="thin"))
header_row = sheet[1]
for cell in header_row:
cell.font = boldFont
cell.alignment = center_align
cell.border = border_text
names = [] # To store names of those having birthdays in the current month
bday = {} # To store the birthdays of the class people
count = 0
for row in range(2, sheet.max_row + 1):
# Retrieve values of the cells from the sheet
name = sheet['A' + str(row)].value
day = sheet['B' + str(row)].value
month = sheet['C' + str(row)].value
year = sheet['D' + str(row)].value
#Create the birthday of each person in dd-mm-yyyy format
birthday = str(day) + '-' + month + '-' + str(year)
#Store the birthday in the bday dictionary
bday[name] = birthday
currentMonth = datetime.now().strftime('%B') #Obtain the current month, say December
# If there is a birthday in the current month, set the value of the cell to True, else False
if currentMonth == month:
sheet['E' + str(row)] = 'True'
names.append(name)
count += 1
else:
sheet['E' + str(row)] = 'False'
# Display the name of the person having birthday in the current month
if count == 0:
print("There aren't any birthdays in this month")
if count == 1:
print("There is " + str(count) + " person with birthday in this month")
for name in names:
print(colored(name, "green"))
else:
print("There are " + str(count) + " people with birthdays in this month")
for name in names:
print(colored(name, "green"))
# Users can retrive the birthdays of other people as well
print("Do you want to find out someone's birthday? (Enter 1 if YES)")
try:
choice = int(input())
except:
logging.warning(colored('You should have entered a number', 'red')) # if input is not an integer
print("\n")
return
if choice == 1:
print("Enter the name")
person = input()
person = person.title() # To get the names in the format it is stored in the spreadsheet
try:
print(person + "'s birthday is on " + str(bday[person]))
except:
print(person + " isn't part of your class")
wb.save('birthdays.xlsx') #We save our changes done in the birthdays.xlsx file
print('\n' * 2)
'''
We can create a birthday card for any person.
The card gets stored in the birthday_wish.docx file.
You can then print this card and send it to your friends
'''
def birthday_card():
screen_clear() #clear the screen
doc = docx.Document()
print("Enter the name of the birthday girl/boy")
name = input()
name = name.title()
p = doc.add_paragraph("Happy Birthday " + name, "Title")
p.alignment = WD_ALIGN_PARAGRAPH.CENTER #Align the birthday wish at the center of the document
doc.add_picture('bday.png', width=docx.shared.Inches(6), height=docx.shared.Cm(12)) #Add a birthday card
doc.add_page_break()
doc.save('birthday_wish.docx') # Save the birthday card in birthday_wish.docx file
print(colored('Birthday Card has been generated', 'blue'))
print('\n' * 2)
'''
Customise a class schedule for every person in the class.
All you have to do is enter the sem, section and names of the students.
The schedules can be found in the schedules.docx file in the current working directory.
There is a schedule for each student on a new page, so you can open a single Word document
and print all schedules together.
'''
def schedule_generator():
screen_clear() #clear the screen
doc = docx.Document() # Returns a Document object
print("Enter the semester")
sem = input()
print("Enter the section")
section = input()
names = [] # To store names of the students
print("List the names of the students")
while True:
print("Enter name of the student" + str(len(names) + 1) + " or enter nothing to stop")
name = input()
if name == '':
break
names.append(name) # Add the names to the list
# Create a schedule for each student in the schedules.docx file
for i in range(len(names)):
p = doc.add_paragraph(names[i] + "'s Class Schedule", "Title")
p.alignment = WD_ALIGN_PARAGRAPH.CENTER
p = doc.add_paragraph("Sem: " + sem + " Section: " + section, "Normal")
p.alignment = WD_ALIGN_PARAGRAPH.CENTER
doc.add_picture('schedule.png', width=docx.shared.Inches(6), height=docx.shared.Cm(12))
doc.add_page_break()
print(colored('Schedules are completed', 'blue'))
doc.save('schedules.docx') # save to schedules.docx file
print('\n' * 2)
'''
It is always better to watermark your documents and assignments with your USN or name.
All you have to do is save your watermarked PDF as watermark.pdf in the current working directory.
You can then pass in any PDf file into the program and apply your watermark to the desired pages of your document.
You can pass "hackers" as the filename. Specify the starting and ending page numbers, say 2 and 5.
You can give the filename where you want your watermarked PDF stored.
'''
def pdf_watermark(filename):
screen_clear() # clear the screen
if not os.path.exists(filename + ".pdf"):
logging.error(colored('File does not exist', 'red'))
return
pdfFile = open(filename + '.pdf', 'rb') # open the file in read binary mode
pdfReader = PyPDF2.PdfFileReader(pdfFile) # pdfFileReader object represents the PDf file
print("Enter the page you want to start the watermark from")
try:
page_start = int(input())
except:
logging.warning(colored('You should have entered a number', 'red')) # if input is not a number
print("\n")
return
print("Enter the page you want to end the watermark")
try:
page_end = int(input())
except:
logging.warning(colored('You should have entered a number', 'red')) # if input is not a number
print("\n")
return
# If user enters a number greater than the pages present
if page_end > pdfReader.numPages:
logging.warning(colored('You have exceeded the total number of pages present', 'yellow'))
return
print("What do you want your new filename to be? (Avoid existing filenames)")
userfilename = input()
pdfWatermarkReader = PyPDF2.PdfFileReader(open('watermark.pdf', 'rb')) # open the watermark.pdf file
pdfWriter = PyPDF2.PdfFileWriter() # PDFFileWriter object
# We loop through the pages can watermark the pages specified by the users
# We then loop through the remaining pages and add the pages to the new file
for pageNum in range(page_start - 1):
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
for pageNum in range(page_start-1, page_end):
pageObj = pdfReader.getPage(pageNum)
pageObj.mergePage(pdfWatermarkReader.getPage(0))
pdfWriter.addPage(pageObj)
for pageNum in range(page_end, pdfReader.numPages):
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
resultPdfFile = open(userfilename+'.pdf', 'wb') # Store the watermarked file in the filename given for the new file
pdfWriter.write(resultPdfFile)
pdfFile.close()
resultPdfFile.close()
print(colored('PDF file has been watermarked', 'blue'))
print('\n' * 2)
'''
Convert your Word assignments into PDF.
You can find the PDf file in your current working directory
'''
def word_converter(filename):
screen_clear()
if not os.path.exists(filename + ".docx"):
logging.error(colored('File does not exist', 'red')) # If file does not exist
return
# Convert the Word document to PDF file
convert(filename+".docx")
convert(filename + ".docx", filename + ".pdf")
convert("/")
print(colored('Word file is converted to PDF', 'blue'))
print('\n' * 2)
'''
Here we can merge the desired PDF files. This code merges all the pages of a PDF together.
You can pass in "harry_potter" and "manual" in the filename.
Specify the new filename where you want the merged PDFs to get stored.
'''
def pdf_merger():
screen_clear()
print("What do you want your new filename to be? (Avoid existing filenames)")
userfilename = input()
files = []
# List all the files needed to be merged
print("List the files to be merged")
while True:
print("Enter name of the file" + str(len(files) + 1) + " or enter nothing to stop")
name = input()
if name == '':
break
if not os.path.exists(name + ".pdf"):
logging.error(colored('File does not exist', 'red')) # If file does not exist
continue
files.append(name + ".pdf")
pdfWriter = PyPDF2.PdfFileWriter() # PDFFileWriter object
for filename in files:
pdfFileObj = open(filename, 'rb') # open each file and add the pages to the PDFFileWriter object
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
for pageNum in range(0, pdfReader.numPages):
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
# Save the resulting PDF to a file.
pdfOutput = open(userfilename+'.pdf', 'wb')
pdfWriter.write(pdfOutput)
pdfOutput.close()
print(colored('PDFs are merged together', 'blue'))
print('\n' * 2)
'''
Convert your PDF into an audiobook by passing the filename.
You can't pause the audiobook in between. You need to close the program if you wish to stop the reading.
However, once the reading is done, it returns back to the menu.
'''
def pdf_speak(filename):
screen_clear()
if not os.path.exists(filename + ".pdf"):
logging.error(colored('File does not exist', 'red'))
return
# Display warning to the user
print(colored('Warning : ', 'yellow'), colored('You cannot pause the audiobook in between', 'yellow'))
print("Do you want to continue?")
answer = input()
answer = answer.lower()
if answer == 'yes':
print("\n")
book = open(filename + '.pdf', 'rb') # Open the file to be converted into audiobook
pdfReader = PyPDF2.PdfFileReader(book)
pages = pdfReader.numPages # Read the total number of pages
speaker = pyttsx3.init()
for num in range(0, pages):
page = pdfReader.getPage(num) # Get each page
text = page.extractText()
print(colored(text, "magenta")) # Display the text to the user
speaker.say(text) # Read out the text
speaker.runAndWait()
speaker.stop() # Stop the reading
print(colored('Reading is completed', 'blue'))
print('\n' * 2)
'''
Secure your PDF files with an encryption and save it as a new file.
You can pass "harry_potter" as the filename. Give the new filename and the password to encrypt.
The encrypted file can be found in the filename you have given.
Before anyone can view the encrypted PDF, they’ll have to enter the password.
'''
def pdf_encrpyt(filename):
screen_clear() # Clear the screen
if not os.path.exists(filename + ".pdf"):
logging.error(colored('File does not exist', 'red')) # File does not exist
return
pdfReader = PyPDF2.PdfFileReader(open(filename + '.pdf', 'rb'))
pdfWriter = PyPDF2.PdfFileWriter()
print("What do you want your new filename to be? (Avoid existing filenames)")
userfilename = input()
for pageNum in range(pdfReader.numPages):
pdfWriter.addPage(pdfReader.getPage(pageNum)) # copy the pages to the PDFFileWriter object
# Ask for encryption
print("Enter the password")
password = input()
pdfWriter.encrypt(password) # Encrypt the file with the password
resultPdf = open(userfilename+'.pdf', 'wb')
pdfWriter.write(resultPdf)
print(colored('Your file has been encrypted', 'blue'))
resultPdf.close()
print('\n' * 2)
'''
This is to verify the PDF's password.
You can pass in the filename(name of the encrypted file) of the previous option to check the password
(or any encrypted file from the cwd).
'''
def pdf_decrpyt(filename):
screen_clear() # clear the screen
if not os.path.exists(filename + ".pdf"):
logging.error(colored('File does not exist', 'red')) # The file does not exist
return
pdfReader = PyPDF2.PdfFileReader(open(filename + '.pdf', 'rb'))
# If file is not encrypted, return the control
if not pdfReader.isEncrypted:
print(colored("Your PDF file is not encrypted. Pass in an encrypted filenames", "red"))
return
print("Enter the password")
password = input()
pdfReader.decrypt(password) # Decrypt the encrypted file with the PDF
try: # if file has been decrypted
pageobj = pdfReader.getPage(0)
print(colored('Your file is decrypted. The password matches', 'blue'))
except:
logging.warning(colored("Couldn't decrypt the file. Kindly check the password entered", "red"))
print('\n' * 2)
# Menu to ask the user's choice and perform corresponding operations
choice = 1
while(choice):
screen_clear() # clear the screen
print(colored("-------------MENU-------------", "magenta"))
print(colored("Enter 1 to find about birthdays", "cyan"))
print(colored("Enter 2 to print your class schedule", "cyan"))
print(colored("Enter 3 for watermarking your PDF", "cyan"))
print(colored("Enter 4 to convert your Word file to PDF", "cyan"))
print(colored("Enter 5 to merge the desired PDF files together", "cyan"))
print(colored("Enter 6 to convert your PDF into audiobook", "cyan"))
print(colored("Enter 7 to secure your PDF file with an encryption", "cyan"))
print(colored("Enter 8 if you want to check your PDF password", "cyan"))
print(colored("Enter 9 to wish your classmate on their birthday", "cyan"))
print(colored("Enter 0 to quit", "cyan"))
try:
choice = int(input())
except:
logging.warning(colored('You should have entered a number', 'red')) # if number is not entered
time.sleep(2)
continue
print("\n")
print(colored("Please pass only the filename. The extensions will be added by the program.", "yellow"))
# Perform necessary actions basis user's choice
if choice == 1:
print(colored("Enter the filename", "green"))
filename = input()
excel_operations(filename)
if choice == 2:
schedule_generator()
if choice == 3:
print(colored("Enter the filename", "green"))
filename = input()
pdf_watermark(filename)
if choice == 4:
print(colored("Enter the filename", "green"))
filename = input()
word_converter(filename)
if choice == 5:
pdf_merger()
if choice == 6:
print(colored("Enter the filename", "green"))
filename = input()
pdf_speak(filename)
if choice == 7:
print(colored("Enter the filename", "green"))
filename = input()
pdf_encrpyt(filename)
if choice == 8:
print(colored("Enter the filename", "green"))
filename = input()
pdf_decrpyt(filename)
if choice == 9:
birthday_card()
if choice == 0:
sys.exit('Thank you')
print(colored("Do you want to continue? (Enter 1 for Yes and 0 for No)", "yellow"))
try:
option = int(input())
except:
logging.warning(colored('You should have entered a number', 'red'))
print("\n")
if option == 1:
continue
else:
sys.exit("Thank you") | 34.796477 | 119 | 0.646139 | 2,433 | 17,781 | 4.693383 | 0.172626 | 0.033628 | 0.02382 | 0.016814 | 0.327875 | 0.265347 | 0.23601 | 0.209651 | 0.186882 | 0.140117 | 0 | 0.005644 | 0.252629 | 17,781 | 511 | 120 | 34.796478 | 0.853638 | 0.169844 | 0 | 0.434505 | 0 | 0 | 0.227395 | 0.002455 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031949 | false | 0.035144 | 0.019169 | 0 | 0.086262 | 0.220447 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
631df5fbfda8dff689bb2e9f472b865d8e576c52 | 3,845 | py | Python | CBOW_nn.py | yuhonglu-bu/BA865-CBOW-nn-model | 4c972c60c455c6cdbf9d266cdc83ef3c6392b60f | [
"MIT"
] | 2 | 2020-03-01T02:34:22.000Z | 2020-03-01T19:40:04.000Z | CBOW_nn.py | yuhonglu-bu/BA865-CBOW-nn-model | 4c972c60c455c6cdbf9d266cdc83ef3c6392b60f | [
"MIT"
] | null | null | null | CBOW_nn.py | yuhonglu-bu/BA865-CBOW-nn-model | 4c972c60c455c6cdbf9d266cdc83ef3c6392b60f | [
"MIT"
] | null | null | null | # Data cleaning
import pandas as pd
import string
def clean_text(text):
x = text.translate(str.maketrans('', '', string.punctuation)) # remove punctuation
x = x.lower().split() # lower case and split by whitespace to differentiate words
return x
example_text = pd.read_csv('https://raw.githubusercontent.com/dylanwalker/BA865/master/datasets/hw3.csv')
cleaned_text = example_text.Review[:100].apply(clean_text)
#Create vocab and word_to_index
vocab = set()
for review in cleaned_text:
vocab.update(set(review))
word_to_index = {word: i for i, word in enumerate(vocab)}
# Define make_cbow_data function
def make_cbow_data(text, window_size):
cbow_data = []
for review in text:
for i in range(window_size, len(review) - window_size):
target = review[i]
context_index = list(range(i - window_size, i + window_size + 1))
context_index.remove(i)
context = []
for index in context_index:
context.append(review[index])
cbow_data.append((context, target))
return cbow_data
# Define CBOW model here
import torch
from torch import nn, optim
from torch.autograd import Variable
import torch.nn.functional
class CBOW(nn.Module):
def __init__(self, vocab_size, embed_dim, window_size, hidden_dim):
super(CBOW, self).__init__()
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.fc1 = nn.Linear(2 * window_size * embed_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, vocab_size)
def forward(self, x):
x = self.embedding(x)
x = x.view(1, -1)
x = self.fc1(x)
x = nn.functional.relu(x, inplace = True)
x = self.fc2(x)
x = nn.functional.log_softmax(x)
return x
# Train the CBOW model
## Parameters
VOCAB_SIZE = len(vocab)
EMBED_DIM = 100
WINDOW_SIZE = 2
HIDDEN_DIM = 30
N_EPOCHS = 3
## Train CBOW model here
cbow_data = make_cbow_data(cleaned_text, WINDOW_SIZE)
cbow_model = CBOW(VOCAB_SIZE, EMBED_DIM, WINDOW_SIZE, HIDDEN_DIM)
if torch.cuda.is_available():
cbow_model = cbow_model.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(cbow_model.parameters(), lr = 0.001)
loss_data = []
for epoch in range(N_EPOCHS):
running_loss = 0
for word in cbow_data:
context, target = word
context = Variable(torch.LongTensor([word_to_index[i] for i in context]))
target = Variable(torch.LongTensor([word_to_index[target]]))
if torch.cuda.is_available():
context = context.cuda()
target = target.cuda()
output = cbow_model(context)
loss = criterion(output, target)
running_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_epoch = running_loss / len(cbow_data)
loss_data.append(loss_epoch)
print(loss_epoch)
# Plot losses vs epoch
import matplotlib.pyplot as plt
epoch = list(range(1, N_EPOCHS + 1))
plt.plot(epoch, loss_data)
plt.xlabel('Number of epochs')
plt.ylabel('Loss')
plt.show()
# Print five synonyms
from math import sqrt as sqrt
from pandas.core.frame import DataFrame
embed = cbow_model.embedding.weight.data.cpu().numpy()
def CosDistance(a, b):
mul=0
ma=0
mb=0
for i in range(len(a)):
mul += a[i]*b[i]
ma += a[i]*a[i]
mb += b[i]*b[i]
cos = mul/sqrt(ma*mb)
return cos
cos_dis_embed = []
for word in embed:
cos_dis = CosDistance(embed[word_to_index['delicious']], word)
cos_dis_embed.append(cos_dis)
cos_dis_index = list(range(0, len(cos_dis_embed)))
cosine_distance = dict(zip(cos_dis_index, cos_dis_embed))
cosine_distance_sort = DataFrame(sorted(cosine_distance.items(), key = lambda item:item[1], reverse = True))
top5_index = cosine_distance_sort[0][1:6]
for index in range(5):
print('The top', index + 1, 'synonyms is', list(word_to_index.keys())[list(word_to_index.values()).index(top5_index[index + 1])]) | 30.515873 | 131 | 0.697009 | 589 | 3,845 | 4.356537 | 0.286927 | 0.038971 | 0.030008 | 0.019875 | 0.091193 | 0.05456 | 0.028059 | 0.028059 | 0 | 0 | 0 | 0.013338 | 0.181014 | 3,845 | 126 | 131 | 30.515873 | 0.801524 | 0.069961 | 0 | 0.039604 | 0 | 0 | 0.034241 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049505 | false | 0 | 0.089109 | 0 | 0.188119 | 0.019802 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
631f2dcffa93fc649bbb00a9f41555c789b5524d | 776 | py | Python | hardware_control/src/speed_cmd_listener.py | tooploox/car_robot | 2c4ba83643b0ff3478d6887bb0a0e7c2d0c0bef2 | [
"MIT"
] | null | null | null | hardware_control/src/speed_cmd_listener.py | tooploox/car_robot | 2c4ba83643b0ff3478d6887bb0a0e7c2d0c0bef2 | [
"MIT"
] | 13 | 2021-04-10T09:44:37.000Z | 2021-04-10T12:37:39.000Z | hardware_control/src/speed_cmd_listener.py | tooploox/car_robot | 2c4ba83643b0ff3478d6887bb0a0e7c2d0c0bef2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from motors_driver import SteeringMotor, DriveMotor, TamiyaVehicle
steering = SteeringMotor(pwm_pin=33, pwm_min=6.5,
pwm_max=10.5,
pwm_init_duty_cycle=8.45,
pwm_neutral=8.45)
drive = DriveMotor(pwm_pin=32, pwm_min=5.5, pwm_max=8.5,
pwm_init_duty_cycle=7,
pwm_neutral=7)
robot = TamiyaVehicle(steering_motor=steering, drive_motor=drive)
def callback_receive(msg):
global robot
robot.move(msg.angular.z, msg.linear.x)
if __name__ == "__main__":
rospy.init_node('cmd_listener')
sub = rospy.Subscriber('/cmd_vel', Twist, callback_receive, queue_size=1)
rospy.spin()
| 25.866667 | 77 | 0.649485 | 106 | 776 | 4.45283 | 0.54717 | 0.033898 | 0.029661 | 0.050847 | 0.072034 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037736 | 0.248711 | 776 | 29 | 78 | 26.758621 | 0.77187 | 0.025773 | 0 | 0 | 0 | 0 | 0.037086 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.166667 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
631f79e61cbca55e944bb046c466e1f4a5690d3b | 4,475 | py | Python | multiframe_star/database/genDB.py | aasensio/DeepLearning | 71838115ce93e0ca96c8314cff3f07de1d64c235 | [
"MIT"
] | null | null | null | multiframe_star/database/genDB.py | aasensio/DeepLearning | 71838115ce93e0ca96c8314cff3f07de1d64c235 | [
"MIT"
] | null | null | null | multiframe_star/database/genDB.py | aasensio/DeepLearning | 71838115ce93e0ca96c8314cff3f07de1d64c235 | [
"MIT"
] | null | null | null | import numpy as np
import h5py
import scipy.io as io
import sys
import scipy.special as sp
import pyfftw
from astropy import units as u
import matplotlib.pyplot as pl
from ipdb import set_trace as stop
from soapy import confParse, SCI, atmosphere
def progressbar(current, total, text=None, width=30, end=False):
"""Progress bar
Args:
current (float): current value of the bar
total (float): total of the bar
text (string): additional text to show
width (int, optional): number of spaces of the bar
end (bool, optional): end character
Returns:
None: None
"""
bar_width = width
block = int(round(bar_width * current/total))
text = "\rProgress {3} : [{0}] {1} of {2}".\
format("#"*block + "-"*(bar_width-block), current, total, text)
if end:
text = text +'\n'
sys.stdout.write(text)
sys.stdout.flush()
def generate_training(n_patches, n_patches_validation, n_stars, n_frames):
# Size of final images
nx = 256
ny = 256
n_zernike = 40
# GREGOR
telescope_radius = 1.44 * 1.440 * u.meter
secondary_radius = 0.404 * 0.404 * u.meter
pixSize = (6.0/512.0) * u.arcsec / u.pixel
lambda0 = 850.0 * u.nm
fov = 3.0 * u.arcsec
border = 100
f_images = h5py.File('database.h5', 'w')
f_images_validation = h5py.File('database_validation.h5', 'w')
database_images = f_images.create_dataset('intensity', (n_patches, n_frames+1, nx, ny, 1), 'f')
database_images_validation = f_images_validation.create_dataset('intensity', (n_patches_validation, n_frames+1, nx, ny, 1), 'f')
loop = 0
loop_val = 0
# load a sim config that defines lots of science cameras across the field
config = confParse.loadSoapyConfig('sh_8x8.py')
# Init a science camera
sci_camera = SCI.PSF(config, mask=np.ones((154,154)))
# init some atmosphere
atmos = atmosphere.atmos(config)
##############
# Training set
##############
for i in range(n_patches):
progressbar(i, n_patches, text='Progress (traininig set)')
star_field = np.zeros((nx, ny))
indx = np.random.randint(border, nx-border)
indy = np.random.randint(border, ny-border)
star_field[indx, indy] = 1.0
# Save original image in file
database_images[i,0,:,:,0] = star_field
star_field_fft = pyfftw.interfaces.numpy_fft.fft2(star_field)
for j in range(n_frames):
# Get phase for this time step
phase_scrns = atmos.moveScrns()
# Calculate all the PSF for this turbulence
psf = sci_camera.frame(phase_scrns)
nx_psf, ny_psf = psf.shape
psf_roll = np.roll(psf.data, int(nx_psf/2), axis=0)
psf_roll = np.roll(psf_roll, int(ny_psf/2), axis=1)
psf_fft = pyfftw.interfaces.numpy_fft.fft2(psf_roll)
image_final = np.real(pyfftw.interfaces.numpy_fft.ifft2(psf_fft * star_field_fft))
database_images[i,j+1,:,:,0] = image_final
for j in range(50):
phase_scrns = atmos.moveScrns()
##############
# Validation set
##############
for i in range(n_patches_validation):
progressbar(i, n_patches_validation, text='Progress (validation set)')
star_field = np.zeros((nx, ny))
indx = np.random.randint(border, nx-border)
indy = np.random.randint(border, ny-border)
star_field[indx, indy] = 1.0
# Save original image in file
database_images_validation[i,0,:,:,0] = star_field
star_field_fft = pyfftw.interfaces.numpy_fft.fft2(star_field)
for j in range(n_frames):
# Get phase for this time step
phase_scrns = atmos.moveScrns()
# Calculate all the PSF for this turbulence
psf = sci_camera.frame(phase_scrns)
nx_psf, ny_psf = psf.shape
psf_roll = np.roll(psf.data, int(nx_psf/2), axis=0)
psf_roll = np.roll(psf_roll, int(ny_psf/2), axis=1)
psf_fft = pyfftw.interfaces.numpy_fft.fft2(psf_roll)
image_final = np.real(pyfftw.interfaces.numpy_fft.ifft2(psf_fft * star_field_fft))
database_images_validation[i,j+1,:,:,0] = image_final
for j in range(50):
phase_scrns = atmos.moveScrns()
f_images.close()
f_images_validation.close()
if (__name__ == '__main__'):
generate_training(100, 5, 1, 1) | 30.862069 | 136 | 0.621229 | 633 | 4,475 | 4.21327 | 0.273302 | 0.040495 | 0.047244 | 0.053993 | 0.490439 | 0.467942 | 0.467942 | 0.440945 | 0.440945 | 0.440945 | 0 | 0.030075 | 0.256983 | 4,475 | 145 | 137 | 30.862069 | 0.77203 | 0.141676 | 0 | 0.375 | 0 | 0 | 0.042439 | 0.005909 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.125 | 0 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
632063c753358993a36421249041214f9269f354 | 285 | py | Python | Proyecto1/AndresA/Project/logParser.py | jasago/SOA2022-1 | 39f142f786887e50eae85e9f90b4a6194164bdc1 | [
"MIT"
] | null | null | null | Proyecto1/AndresA/Project/logParser.py | jasago/SOA2022-1 | 39f142f786887e50eae85e9f90b4a6194164bdc1 | [
"MIT"
] | null | null | null | Proyecto1/AndresA/Project/logParser.py | jasago/SOA2022-1 | 39f142f786887e50eae85e9f90b4a6194164bdc1 | [
"MIT"
] | 11 | 2022-02-22T21:38:08.000Z | 2022-03-02T04:52:35.000Z | f = open("pylint.log", "r")
report = 0
for line in f:
if "/10" in line:
report = line
break
f.close()
init = report.index("at ")
end = report.index("/")
grade = report[init:end]
space = grade.index(" ")
grade = grade[space+1:end]
grade = float(grade)
print(grade)
| 17.8125 | 27 | 0.6 | 44 | 285 | 3.886364 | 0.522727 | 0.128655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017937 | 0.217544 | 285 | 15 | 28 | 19 | 0.748879 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6321ef085b7a328b010ba23147704ee0cb32f764 | 15,211 | py | Python | vespa/interfaces/cli_batch/analysis_cli_oneil_results_v1.py | vespa-mrs/vespa | 6d3e84a206ec427ac1304e70c7fadf817432956b | [
"BSD-3-Clause"
] | null | null | null | vespa/interfaces/cli_batch/analysis_cli_oneil_results_v1.py | vespa-mrs/vespa | 6d3e84a206ec427ac1304e70c7fadf817432956b | [
"BSD-3-Clause"
] | 4 | 2021-04-17T13:58:31.000Z | 2022-01-20T14:19:57.000Z | vespa/interfaces/cli_batch/analysis_cli_oneil_results_v1.py | vespa-mrs/vespa | 6d3e84a206ec427ac1304e70c7fadf817432956b | [
"BSD-3-Clause"
] | 3 | 2021-06-05T16:34:57.000Z | 2022-01-19T16:13:22.000Z | # Python modules
import os
import sys
# 3rd party modules
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, NullFormatter
# Our modules
import vespa.analysis.util_import as util_import
import vespa.common.util.ppm as util_ppm
import vespa.common.util.misc as util_misc
SUPPORTED = ['wbnaa', 'siemens dicom']
DESC = \
"""Command line interface to process MRS data in Vespa-Analysis.
Data filename, preset file name, data type string and CSV output
file name values are all required for this command to function
properly.
Note. You may have to enclose data/preset/output strings in double
quotation marks for them to process properly if they have
spaces or other special characters embedded in them.
"""
#mpl.rc('text', usetex=True)
#mpl.rcParams['text.usetex']=True
#mpl.rcParams['text.latex.unicode']=True
def analysis_cli_oneil_results(dataset, csvfile, viffpath='',
vespa_version='',
timestamp='',
verbose=False, debug=False):
if viffpath:
viffname = os.path.basename(viffpath)
else:
viffname = 'none'
raw = dataset.blocks["raw"]
data_source = raw.get_data_source(dataset.all_voxels)
data_source = os.path.basename(data_source)
dim0, dim1, dim2, dim3 = dataset.spectral_dims
sw = dataset.sw
voxel = dataset.all_voxels
key = 'fit'
if key in list(dataset.blocks.keys()):
block = dataset.blocks[key]
results = block.chain.run(voxel, entry='output_refresh')
else:
msg = """This dataset has no 'fit' block, returning."""
print(msg, file=sys.stderr)
print(msg, file=sys.stdout)
sys.exit(-1)
freq = results['data'].copy()
base = results['fit_baseline'].copy()
base.shape = 1, base.shape[0]
yfit = results['yfit'].copy()
yfits = results['yfit'].copy() if len(yfit.shape)==1 else np.sum(yfit, axis=0)
if len(yfit.shape) == 1:
yfit.shape = 1,yfit.shape[0]
# Print Control Setting
outbase = 'D:\\Users\\bsoher\\myplot'
fontname = 'Courier New' # 'Consolas' 'Calibri' 'Courier New' 'Times New Roman'
savetype = 'pdf' # 'svg' 'eps' 'pdf' 'png' 'raw' 'rgba' 'ps' 'pgf' etc.
minplot, maxplot = 0.1, 4.9 # in ppm
xvals = [dataset.pts2ppm(val) for val in range(dim0)]
minppm, maxppm = xvals[-1], xvals[0]
minplot = minplot if minplot >= minppm else minppm
maxplot = maxplot if maxplot <= maxppm else maxppm
imin = int(dataset.ppm2pts(maxplot))
imax = int(dataset.ppm2pts(minplot))
tmin = np.round((minplot+0.5)) # integer ppm just above minppm
tmax = np.round((maxplot-0.5)) # integer ppm just under maxppm
# Create the figure
fig = plt.figure(figsize=(11,8.5))
plt.subplots_adjust(hspace=0.001)
fsupported = plt.gcf().canvas.get_supported_filetypes()
if savetype not in list(fsupported.keys()):
msg = r"Output file format '%s' not supported by current Matplotlib backend, Returning." % savetype
print(msg, file=sys.stderr)
print(msg, file=sys.stdout)
sys.exit(-1)
outname = outbase+'.'+savetype
nullfmt = NullFormatter() # no labels
left, bottom = 0.05, 0.05 # set up for 8.5x11 landscape printout
w1, w2 = 0.55, 0.35
h1, h2, h3 = 0.07, 0.61, 0.07
hpad, vpad = 0.02, 0.001
rect1 = [left, bottom+h1+h2, w1, h3]
rect2 = [left, bottom+h1, w1, h2]
rect3 = [left, bottom, w1, h1] # xmin, ymin, dx, and dy
rect4 = [left+w1+hpad, bottom, w2, h1+h2+h3]
dat1 = freq[imin:imax] - (yfits+base[0,:])[imin:imax]
dat1 = dat1.real
min1, max1 = min(dat1),max(dat1)
delt1 = (max1 - min1)*0.8
min1, max1 = min1 - delt1, max1 + delt1
# tmin1, tmax1 = int(min1+0.5), int(max1-0.5)
# step1 = int((tmax1 - tmin1)/4)
ax1 = fig.add_axes(rect1) # xmin, ymin, dx, and dy
ax1.xaxis.set_major_formatter(nullfmt) # no x labels, have to go before plot()
ax1.plot(xvals[imin:imax],dat1)
ax1.set_ylabel('some numbers', fontsize=8.0)
plt.yticks([0.0,max1])
plt.ylim(min1, max1)
plt.xticks(np.arange(tmin, tmax, 1.0))
plt.xlim(maxplot, minplot)
ax2 = fig.add_axes(rect2) # xmin, ymin, dx, and dy
ax2.xaxis.set_major_formatter(nullfmt) # no x labels, have to go before plot()
ax2.plot(xvals[imin:imax],freq[imin:imax])
ax2.plot(xvals[imin:imax],(yfits+base[0,:])[imin:imax])
ax2.plot(xvals[imin:imax],(base[0,:])[imin:imax])
ax2.set_ylabel('some more numbers', fontsize=8.0)
plt.yticks(np.arange(-2.0, 13.0, 4.0))
plt.ylim(-3, 14)
plt.xticks(np.arange(tmin, tmax, 1.0))
plt.xlim(maxplot, minplot)
ax3 = fig.add_axes(rect3) # xmin, ymin, dx, and dy
ax3.plot(xvals[imin:imax],(base[0,:])[imin:imax])
ax3.set_ylabel('original numbers', fontsize=8.0)
ax3.set_xlabel('Chemical Shift [ppm]', fontsize=8.0)
plt.yticks(np.arange(-2.0, 13.0, 4.0))
plt.ylim(-3, 14)
plt.xticks(np.arange(tmin, tmax, 1.0))
plt.xlim(maxplot, minplot)
for ax in [ax1,ax2,ax3]:
ax.xaxis.set_major_locator(MultipleLocator(1)) # this is for even distrib across plot
ax.xaxis.set_minor_locator(MultipleLocator(0.2))
ax.yaxis.set_major_locator(MultipleLocator(4.0))
# ax.xaxis.set_major_formatter(FormatStrFormatter('%d')) # yticks() above is finer grained
# ax.yaxis.set_minor_locator(MultipleLocator(2.0))
ax.grid(which='major', axis='x', linewidth=0.50, linestyle='-', color='0.75')
ax.grid(which='minor', axis='x', linewidth=0.25, linestyle=':', color='0.75')
ax.grid(which='major', axis='y', linewidth=0.25, linestyle=':', color='0.75')
ax.grid(which='minor', axis='y', linewidth=0.25, linestyle=':', color='0.75')
ax4 = fig.add_axes(rect4, axisbg='g') # xmin, ymin, dx, and dy
ax4.xaxis.set_major_formatter(nullfmt)
ax4.yaxis.set_major_formatter(nullfmt)
ax4.axis('off')
nrow = 10
clust_data = np.round(np.random.random((nrow,3)),3)
collabel=("col 1", "col 2", "col 3")
rowlabel=[str(i+1) for i in range(nrow)]
the_table = ax4.table(cellText=clust_data,
cellLoc='left',
colLoc='left',
colWidths=[0.3,0.3,0.3],
colLabels=collabel,
rowLoc='center',
#rowLabels=rowlabel,
fontsize=8.0,
loc='upper center')
the_table.auto_set_font_size(False)
the_table.set_fontsize(7.0)
table_props = the_table.properties()
cheight = table_props['children'][0].get_height() # all start with same default
keys = list(table_props['celld'].keys())
for key in keys: # use cell dict here to test for col/row labels
cell = table_props['celld'][key]
cell.set_height(1.1*cheight)
cell.get_text().set_fontname(fontname)
if key[0] == 0:
if key[1] == 0:
cell.visible_edges = 'BLT'
elif key[1] == 2:
cell.visible_edges = 'BRT'
else:
cell.visible_edges = 'BT'
cell.set_linewidth(1.0)
cell.set_linestyle('-')
cell.get_text().set_fontweight('bold')
else:
if key[1] == 0:
cell.visible_edges = 'BL'
elif key[1] == 2:
cell.visible_edges = 'BR'
else:
cell.visible_edges = 'B'
cell.set_linewidth(0.25)
cell.set_linestyle('-')
# Retrieve an element of a plot and set properties
for tick in ax3.xaxis.get_ticklabels():
tick.set_fontsize(8.0)
tick.set_fontname(fontname)
tick.set_color('gray')
tick.set_weight('bold')
for ax in [ax1,ax2,ax3]:
ax.yaxis.label.set_fontname(fontname)
for tick in ax.yaxis.get_ticklabels():
tick.set_fontsize(8.0)
tick.set_fontname(fontname)
tick.set_color('gray')
tick.set_weight('normal')
msg = "Vespa-Analysis Version: %s Processing Timestamp: %s" % (vespa_version, timestamp)
plt.figtext(0.03, 0.95, msg,
wrap=True,
horizontalalignment='left',
fontsize=8,
fontname=fontname)
msg = "VIFF File : %s \nData Source : %s" % (viffname, data_source)
plt.figtext(0.03, 0.90, msg,
wrap=True,
horizontalalignment='left',
fontsize=10,
fontname=fontname)
fig.canvas.draw()
fig.savefig(outname, pad_inches=0.5)#, bbox_inches=(6,8)) #'tight')
bob = 10
bob += 1
# # Save results to CSV file --------------------------------------
#
# if verbose: print """Saving results to CSV file "%s". """ % csvfile
#
# fit = dataset.blocks["fit"]
# data_source = dataset.blocks["raw"].get_data_source(voxel)
#
# val, hdr = fit.results_as_csv(voxel[0], fit.chain.fitted_lw,
# fit.chain.minmaxlw[0],
# fit.chain.minmaxlw[1],
# data_source, outxml)
# nhdr = len(hdr)
# val = ",".join(val)
# hdr = ",".join(hdr)
# val += "\n"
# hdr += "\n"
#
# hdr_flag = True
# if os.path.isfile(csvfile):
# with open(csvfile, 'r+') as f:
# data = f.readlines()
# if len(data)>1:
# last = data[-1]
# nlast = len(last.split(','))
# if nlast == nhdr:
# hdr_flag = False
#
# with open(csvfile, 'a') as f:
# if hdr_flag:
# f.write(hdr)
# f.write(val)
def _open_viff(datafile):
datasets = []
filename = datafile
timestamp = ''
msg = ""
try:
importer = util_import.DatasetCliImporter(filename)
except IOError:
msg = """I can't read the file "%s".""" % filename
except SyntaxError:
msg = """The file "%s" isn't valid Vespa Interchange File Format.""" % filename
if msg:
print(msg, file=sys.stderr)
print(msg, file=sys.stdout)
sys.exit(-1)
else:
# Time to rock and roll!
dsets, timestamp = importer.go()
for item in dsets:
datasets.append(item)
if datasets:
for dataset in datasets:
if dataset.id == datasets[-1].id:
dataset.dataset_filename = filename
# dataset.filename is an attribute set only at run-time
# to maintain the name of the VIFF file that was read in
# rather than deriving a filename from the raw data
# filenames with *.xml appended. But we need to set this
# filename only for the primary dataset, not the associated
# datasets. Associated datasets will default back to their
# raw filenames if we go to save them for any reason
else:
dataset.dataset_filename = ''
return datasets, timestamp
def main():
verbose = True
datatype = 'siemens dicom'
# Processing of SVS_EDIT_OFF files
STARTDIR = 'D:\\Users\\bsoher\\projects\\2017_oneil_ucla_lipid_contam_svs\\MRS_RAW_DATA\\_all_svs_edit_off' # \\fitted_pass1'
csvfile = STARTDIR+'\\bjs_csv_output_file_off.txt'
# # Processing of SVS_EDIT_DIFF files
# STARTDIR = 'D:\\Users\\bsoher\\projects\\2017_oneil_ucla_lipid_contam_svs\\MRS_RAW_DATA\\_all_svs_edit_diff\\fitted_pass1'
# csvfile = STARTDIR+'\\bjs_csv_output_file_diff.txt'
# # this gets all files *.IMA in all subdirectories of STARTDIR
# imafiles = []
# for dirpath, dirnames, filenames in os.walk(STARTDIR):
# for filename in [f for f in filenames if f.endswith(".xml")]:
# imafiles.append(os.path.join(dirpath, filename))
# print os.path.join(dirpath, filename)
vespa_version = util_misc.get_vespa_version()
i = 0
imafiles = ['D:\\Users\\bsoher\\temp\\dmx\\test_5076.0004.0002.xml',]
for datafile in imafiles:
# Test input arguments for consistency --------------------------
msg = ''
if not os.path.isfile(datafile):
msg = """Main DATAFILE does not exist "%s".""" % datafile
if msg:
print(msg, file=sys.stderr)
print(msg, file=sys.stdout)
sys.exit(-1)
if not os.path.isfile(csvfile):
if verbose:
pass
print("""Output CSV file will be created - %s""" % csvfile)
# Load Main Dataset --------------------------
# if verbose: print """%s - Load Data into a Dataset object - %s""" % (str(i), datafile)
dataset, timestamp = _open_viff(datafile)
dataset = dataset[-1]
print(str(i)+' : '+' - '+datafile)
analysis_cli_oneil_results(dataset, csvfile, viffpath=datafile,
vespa_version=vespa_version,
timestamp=timestamp,
verbose=verbose,
debug=False)
i += 1
if i >= 1: break # debug statement to exit after one file processed
bob = 10
bob += 1
if __name__ == '__main__':
main()
# # no labels
# nullfmt = NullFormatter() # no labels
#
# ax4.xaxis.set_major_formatter(nullfmt) # have to go before plot()
# ax4.yaxis.set_major_formatter(nullfmt) # have to go before plot()
# ax4.plot(freq[1000:1800])
# plt.xticks(np.arange(100, 800, 200))
# plt.xlim(0, 800)
# # Gets rid of xaxis label for ax1 and ax2
# xticklabels = ax1.get_xticklabels() + ax2.get_xticklabels()
# plt.setp(xticklabels, visible=False)
# # Annotate example with mathtext and how to set color with 3 term RGB vector
# mpl_grey_rvb = (51./255., 51./255., 51./255.)
# mpl_grey_rvb = (1./255., 1./255., 1./255.)
# tmp2 =r"$\mathrm{Roman}\ , \ \mathit{Italic}\ , \ \mathtt{Typewriter} \, \ \mathrm{or}\ \mathcal{CALLIGRAPHY}$"
# plt.annotate(tmp2, xy=(110.0, 7.0),
# xycoords='data',
# color=mpl_grey_rvb,
# fontsize=7) | 35.210648 | 129 | 0.548813 | 1,896 | 15,211 | 4.310654 | 0.272679 | 0.010767 | 0.011746 | 0.014682 | 0.261471 | 0.22244 | 0.192096 | 0.155757 | 0.138138 | 0.128227 | 0 | 0.037729 | 0.320426 | 15,211 | 432 | 130 | 35.210648 | 0.752926 | 0.278877 | 0 | 0.228216 | 0 | 0 | 0.087498 | 0.019242 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012448 | false | 0.004149 | 0.041494 | 0 | 0.058091 | 0.041494 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6322fbe0f7eb323cafcdb83afbb0005a6c5ad08a | 560 | py | Python | examples/src/Text/GetTextFromSmartArtNode.py | aspose-slides/Aspose.Slides-for-Python-via-.NET | c55ad5c71f942598f1e67e22a52cbcd1cb286467 | [
"MIT"
] | null | null | null | examples/src/Text/GetTextFromSmartArtNode.py | aspose-slides/Aspose.Slides-for-Python-via-.NET | c55ad5c71f942598f1e67e22a52cbcd1cb286467 | [
"MIT"
] | null | null | null | examples/src/Text/GetTextFromSmartArtNode.py | aspose-slides/Aspose.Slides-for-Python-via-.NET | c55ad5c71f942598f1e67e22a52cbcd1cb286467 | [
"MIT"
] | null | null | null | import aspose.slides as slides
# ExStart:GetTextFromSmartArtNode
# The path to the documents directory.
dataDir = "./examples/data/"
outDir = "./examples/out/"
with slides.Presentation(dataDir + "smart_art_access.pptx") as presentation:
slide = presentation.slides[0]
smartArt = slide.shapes[0]
smartArtNodes = smartArt.all_nodes
for smartArtNode in smartArtNodes:
for nodeShape in smartArtNode.shapes:
if nodeShape.text_frame is not None:
print(nodeShape.text_frame.text)
# ExEnd:GetTextFromSmartArtNode | 31.111111 | 76 | 0.726786 | 64 | 560 | 6.28125 | 0.640625 | 0.064677 | 0.089552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004396 | 0.1875 | 560 | 18 | 77 | 31.111111 | 0.879121 | 0.175 | 0 | 0 | 0 | 0 | 0.11329 | 0.045752 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6324e5d7c136080bea9df30dea6828c8bf4b3896 | 2,538 | py | Python | tests/test_storages.py | ValentinATA/sirius-sdk-python | b26aea94f6fc94980a4c9be6d2b8f2345c875964 | [
"Apache-2.0"
] | 9 | 2020-08-10T11:57:35.000Z | 2022-03-18T21:45:36.000Z | tests/test_storages.py | ValentinATA/sirius-sdk-python | b26aea94f6fc94980a4c9be6d2b8f2345c875964 | [
"Apache-2.0"
] | 3 | 2021-03-12T22:42:27.000Z | 2021-05-18T11:46:01.000Z | tests/test_storages.py | ValentinATA/sirius-sdk-python | b26aea94f6fc94980a4c9be6d2b8f2345c875964 | [
"Apache-2.0"
] | 7 | 2020-10-30T15:54:45.000Z | 2022-02-28T06:59:59.000Z | import uuid
import pytest
from sirius_sdk import Agent
from sirius_sdk.agent.storages import InWalletImmutableCollection
from sirius_sdk.storages import InMemoryKeyValueStorage, InMemoryImmutableCollection
@pytest.mark.asyncio
async def test_inmemory_kv_storage():
kv = InMemoryKeyValueStorage()
await kv.select_db('db1')
await kv.set('key1', 'value1')
value = await kv.get('key1')
assert value == 'value1'
await kv.select_db('db2')
await kv.set('key1', 1000)
value = await kv.get('key1')
assert value == 1000
await kv.select_db('db1')
value = await kv.get('key1')
assert value == 'value1'
await kv.delete('key1')
value = await kv.get('key1')
assert value is None
await kv.delete('unknown-key')
@pytest.mark.asyncio
async def test_inmemory_immutable_collection():
collection = InMemoryImmutableCollection()
await collection.select_db('db1')
await collection.add('Value1', {'tag1': 'tag-val-1', 'tag2': 'tag-val-2'})
await collection.add('Value2', {'tag1': 'tag-val-1', 'tag2': 'tag-val-3'})
fetched1 = await collection.fetch({'tag1': 'tag-val-1'})
assert len(fetched1) == 2
fetched1 = await collection.fetch({'tag2': 'tag-val-2'})
assert len(fetched1) == 1
assert fetched1[0] == 'Value1'
await collection.select_db('db2')
fetched3 = await collection.fetch({})
assert len(fetched3) == 0
@pytest.mark.asyncio
async def test_inwallet_immutable_collection(agent1: Agent):
await agent1.open()
try:
collection = InWalletImmutableCollection(agent1.wallet.non_secrets)
value1 = {
'key1': 'value1',
'key2': 10000
}
value2 = {
'key1': 'value2',
'key2': 50000
}
await collection.select_db(db_name=uuid.uuid4().hex)
await collection.add(value1, {'tag': 'value1'})
await collection.add(value2, {'tag': 'value2'})
fetched, count = await collection.fetch({'tag': 'value1'})
assert count == 1
assert len(fetched) == 1
assert fetched[0] == value1
fetched, count = await collection.fetch({'tag': 'value2'})
assert count == 1
assert len(fetched) == 1
assert fetched[0] == value2
fetched, count = await collection.fetch({})
assert count == 2
await collection.select_db(db_name=uuid.uuid4().hex)
fetched, count = await collection.fetch({})
assert count == 0
finally:
await agent1.close()
| 27.586957 | 84 | 0.630418 | 299 | 2,538 | 5.277592 | 0.230769 | 0.142586 | 0.08872 | 0.038023 | 0.420152 | 0.397338 | 0.326996 | 0.160963 | 0.160963 | 0.108999 | 0 | 0.045735 | 0.233255 | 2,538 | 91 | 85 | 27.89011 | 0.765159 | 0 | 0 | 0.279412 | 0 | 0 | 0.091411 | 0 | 0 | 0 | 0 | 0 | 0.235294 | 1 | 0 | false | 0 | 0.073529 | 0 | 0.073529 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6324fcccbfb3635c79ba9115694e16374b17205d | 9,199 | py | Python | paddlenlp/ops/distributed/parallel.py | wzzju/PaddleNLP | 1757a4fc2a3cd5a45f75c6482746777752b414d8 | [
"Apache-2.0"
] | 6 | 2021-06-08T13:19:35.000Z | 2021-06-24T15:08:54.000Z | paddlenlp/ops/distributed/parallel.py | xihuanafeng/PaddleNLP | 14c3209118b2cadcce9a8f66b760c9cddb3a02ad | [
"Apache-2.0"
] | null | null | null | paddlenlp/ops/distributed/parallel.py | xihuanafeng/PaddleNLP | 14c3209118b2cadcce9a8f66b760c9cddb3a02ad | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
from paddle.fluid.framework import in_dygraph_mode
from paddle.distributed.fleet import fleet
__all__ = [
'guard',
'ParallelEmbedding',
'ParallelLinear',
'ColumnParallelLiner',
'RowParallelLiner',
]
def guard(device):
def decorator(Layer):
class WrapperClass(Layer):
def __init__(self, *args, **kw):
with paddle.static.device_guard(device):
print("Init {} on {}".format(Layer.__name__, device))
super().__init__(*args, **kw)
def forward(self, *args, **kw):
with paddle.static.device_guard(device):
print("Forward {} on {}".format(Layer.__name__, device))
return super().forward(*args, **kw)
return WrapperClass
return decorator
class ParallelEmbedding(nn.Layer):
"""
Parallel Embedding
"""
def __init__(self,
num_embeddings,
embedding_dim,
num_partitions,
padding_idx=None,
weight_attr=None,
name=None):
super().__init__()
size = (num_embeddings, embedding_dim)
if in_dygraph_mode():
rank = paddle.distributed.get_rank()
nranks = paddle.distributed.get_world_size()
else:
assert fleet._role_maker, ("To use paddle.distributed.split, "
"you must call fleet.init() firstly.")
rank = fleet.worker_index()
nranks = fleet.worker_num()
# rank within a model parallel group
inner_rank = rank % num_partitions
self.inner_rank = inner_rank
self.num_partitions = num_partitions
per_part_size = (size[0] + num_partitions - 1) // num_partitions
last_part_size = size[0] - per_part_size * (num_partitions - 1)
if inner_rank == num_partitions - 1: per_part_size = last_part_size
per_part_size += 1 # make the last row as the padding index
self.origin_size = size
if not name:
self.name = "emb_rank_%d" % inner_rank
else:
self.name = name + "_rank_%d" % inner_rank
self.per_part_embeddings = per_part_size
self.origin_num_embeddings = self.origin_size[0]
self.weight_attr = weight_attr
self.embedding = paddle.nn.Embedding(
self.per_part_embeddings,
self.origin_size[1],
padding_idx=self.per_part_embeddings - 1,
sparse=False,
weight_attr=self.weight_attr,
name=self.name)
self.embedding.weight.is_distributed = True
# Alias for nn.Embedding
self.weight = self.embedding.weight
startup_block = paddle.static.default_startup_program().global_block()
main_block = paddle.static.default_main_program().global_block()
startup_block.vars[self.embedding.weight.name].is_distributed = True
main_block.vars[self.embedding.weight.name].is_distributed = True
def forward(self, x):
origin_input_shape = x.shape
if len(origin_input_shape) == 2:
x = paddle.unsqueeze(x, axis=-1)
else:
assert origin_input_shape[-1] == 1, (
"The last dimension size of x must be 1.")
x_shard = paddle.shard_index(x, self.origin_num_embeddings,
self.num_partitions, self.inner_rank,
self.per_part_embeddings - 1)
if len(origin_input_shape) == 2:
x_shard = paddle.squeeze(x_shard, axis=-1)
emb_out = self.embedding(x_shard)
paddle.distributed.all_reduce(emb_out, group=None)
return emb_out
class ParallelLinear(nn.Layer):
"""
Parallel Linear
"""
def __init__(self,
size,
axis,
num_partitions=1,
gather_out=True,
param_attr=None,
bias_attr=None,
name=None):
super().__init__()
if in_dygraph_mode():
rank = paddle.distributed.get_rank()
nranks = paddle.distributed.get_world_size()
else:
assert fleet._role_maker, ("To use paddle.distributed.split, "
"you must call fleet.init() firstly.")
rank = fleet.worker_index()
nranks = fleet.worker_num()
# rank within a model parallel group
inner_rank = rank % num_partitions
self.axis = axis
if axis == 0:
assert size[0] % num_partitions == 0, (
"Number of rows of the weight for linear ({}) must be"
" divisible by num_partitions ({})".format(size[0],
num_partitions))
self.per_part_size = size[0] // num_partitions
linear_size = (self.per_part_size, size[1])
elif axis == 1:
assert size[1] % num_partitions == 0, (
"Number of column of the weight for linear ({}) must be"
" divisible by num_partitions ({})".format(size[1],
num_partitions))
self.per_part_size = size[1] // num_partitions
linear_size = (size[0], self.per_part_size)
else:
raise ValueError("The value of axis must be 0 or 1, but the value "
"given is {}.".format(axis))
num_rows, num_cols = linear_size
self.gather_out = gather_out
self.axis = axis
if not name:
name = "fc_by_row_rank_%d" % inner_rank if axis == 0 else "fc_by_col_rank_%d" % inner_rank
else:
name = name + "_by_row_rank_%d" % inner_rank if axis == 0 else name + "_by_col_rank_%d" % inner_rank
self.linear = paddle.nn.Linear(
num_rows,
num_cols,
weight_attr=param_attr,
bias_attr=bias_attr,
name=name)
weight = self.linear.weight
weight.is_distributed = True
# alias for weight tensor
self.weight = self.linear.weight
startup_block = paddle.static.default_startup_program().global_block()
main_block = paddle.static.default_main_program().global_block()
startup_block.vars[weight.name].is_distributed = True
main_block.vars[weight.name].is_distributed = True
# set is_distributed for splited bias
# if a linear layer is splited by row, each rank would hold a complete bias
# if a linear layer is splited by col, the bias would also be split into each rank as its weight
if axis == 1 and self.linear._bias_attr != False:
startup_block.vars[self.linear.bias.name].is_distributed = True
main_block.vars[self.linear.bias.name].is_distributed = True
def forward(self, x):
if self.axis == 0:
assert x.shape[-1] == self.per_part_size, (
"The width ({}) of the input "
"x must be equal to the height ({}) of the weight. Maybe you "
"should split the input x using paddle.split.".format(
x.shape[-1], self.per_part_size))
linear_out = self.linear(x)
if self.gather_out:
if self.axis == 0:
paddle.distributed.all_reduce(linear_out)
else:
output = []
paddle.distributed.all_gather(output, linear_out)
linear_out = paddle.concat(
output, axis=len(linear_out.shape) - 1)
return linear_out
class ColumnParallelLiner(ParallelLinear):
def __init__(self,
size,
num_partitions,
param_attr=None,
bias_attr=None,
name=None):
super().__init__(
size,
axis=1,
num_partitions=num_partitions,
gather_out=False,
param_attr=param_attr,
bias_attr=bias_attr)
class RowParallelLiner(ParallelLinear):
def __init__(self,
size,
num_partitions,
param_attr=None,
bias_attr=None,
name=None):
super().__init__(
size,
axis=0,
num_partitions=num_partitions,
gather_out=True,
param_attr=param_attr,
bias_attr=bias_attr)
| 36.503968 | 112 | 0.573541 | 1,076 | 9,199 | 4.650558 | 0.182156 | 0.064948 | 0.024181 | 0.016787 | 0.484213 | 0.436051 | 0.385891 | 0.329536 | 0.288769 | 0.253397 | 0 | 0.008209 | 0.337863 | 9,199 | 251 | 113 | 36.649402 | 0.813331 | 0.106533 | 0 | 0.394737 | 0 | 0 | 0.088394 | 0.006121 | 0 | 0 | 0 | 0 | 0.031579 | 1 | 0.052632 | false | 0 | 0.021053 | 0 | 0.126316 | 0.010526 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
632592561e16a3a6878ac053e05932a7d980049d | 2,255 | py | Python | mcm/monte2.py | ant6/Monte-Carlo-Modulators | 94118d746cc0416e7cc083a5e55f90e396db0876 | [
"MIT"
] | null | null | null | mcm/monte2.py | ant6/Monte-Carlo-Modulators | 94118d746cc0416e7cc083a5e55f90e396db0876 | [
"MIT"
] | null | null | null | mcm/monte2.py | ant6/Monte-Carlo-Modulators | 94118d746cc0416e7cc083a5e55f90e396db0876 | [
"MIT"
] | null | null | null | from os.path import join
import time
from mcm.lottery import random_positions
from mcm.measurements import roll_peak_to_val, where_is_this_val, sum_peak_to_one
from mcm.peak_reader import read_one_peak
import numpy as np
def quality(sum_peak):
domain = sum_peak[0]
values = sum_peak[1]
ind_begin = where_is_this_val(begin, domain)
ind_end = where_is_this_val(end, domain)
pre_penalty = np.abs(values[0:ind_begin].sum())
post_penalty = np.abs(values[ind_end:-1].sum())
middle_sector = np.array(values[ind_begin:ind_end]) - 1
middle_penalty = np.abs(middle_sector.sum())
return pre_penalty + middle_penalty + post_penalty
if __name__ == '__main__':
# load peak data
domain = read_one_peak(join("..", "data", "domain.dat"))
peak1_vals = read_one_peak(join("..", "data", "rs0.dat"))
peak2_vals = read_one_peak(join("..", "data", "rs3000.dat"))
peak3_vals = read_one_peak(join("..", "data", "rs6000.dat"))
peak1 = np.array([domain, peak1_vals])
peak2 = np.array([domain, peak2_vals])
peak3 = np.array([domain, peak3_vals])
peak_list = [peak1, peak2, peak3, peak1, peak1, peak3]
t_start = time.time()
k = 0
k_end = 1000
r = 0
begin = 5
end = 15
results = []
best_peaks = []
best = 999999
while k < k_end:
k += 1
lottery_positions = random_positions(peak_list)
peaks_to_sum = []
for i in range(6):
peaks_to_sum.append(roll_peak_to_val(peak_list[i], lottery_positions[i]))
# calculate sum peak and check condition score
result_peak = sum_peak_to_one(peaks_to_sum)
result_peak[1] /= (result_peak[1].max())
qnew = quality(result_peak)
results.append((k, qnew))
if qnew < best:
best = qnew
best_peaks.append(result_peak)
print('best:', len(best_peaks), best, ' added.')
if k % 100 == 0:
print("Step: %d, sample score: %.2f" % (k, qnew))
t_end = time.time()
print("Computed in %.2f" % (t_end - t_start))
# dump data
import pickle
with open("mc.p", "w+b") as f:
pickle.dump(results, f)
with open("best_mc.p", "w+b") as f2:
pickle.dump(best_peaks, f2) | 28.910256 | 85 | 0.623503 | 334 | 2,255 | 3.934132 | 0.302395 | 0.031963 | 0.041857 | 0.045662 | 0.077626 | 0.052511 | 0 | 0 | 0 | 0 | 0 | 0.032768 | 0.242129 | 2,255 | 78 | 86 | 28.910256 | 0.736103 | 0.030599 | 0 | 0 | 0 | 0 | 0.065964 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017544 | false | 0 | 0.122807 | 0 | 0.157895 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63265bb15f2ffc980691d7667d2086d4f37c59f2 | 2,275 | py | Python | subsevenzip/archive.py | dsvensson/subsevenzip-python | 078c3876e5fd1ffd84e57ae992a142d247a5832f | [
"0BSD"
] | 3 | 2015-01-05T14:09:25.000Z | 2018-03-21T19:33:15.000Z | subsevenzip/archive.py | dsvensson/subsevenzip-python | 078c3876e5fd1ffd84e57ae992a142d247a5832f | [
"0BSD"
] | null | null | null | subsevenzip/archive.py | dsvensson/subsevenzip-python | 078c3876e5fd1ffd84e57ae992a142d247a5832f | [
"0BSD"
] | null | null | null | # Copyright (c) 2015, Daniel Svensson <dsvensson@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for
# any purpose with or without fee is hereby granted, provided that the
# above copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
import lzma
import io
import builtins
from .parser import parse_headers
from .buffer import ReadBuffer
from .codec import open_lzma_stream
class SevenZipArchive(object):
def __init__(self, factory, files):
self._factory = factory
self._stream = None
self.files = files
def get_content(self, file):
if not self._stream:
self._stream = self._factory()
try:
self._stream.seek(file._offset)
return self._stream.read(file.size)
except lzma.LZMAError:
# Seems like it's not possible to seek backward
self._stream = self._factory()
self._stream.seek(file._offset)
return self._stream.read(file.size)
def open(arg):
if isinstance(arg, bytes):
close_fd = -1
buf = ReadBuffer(io.BytesIO(arg))
elif isinstance(arg, str):
close_fd = builtins.open(arg, "rb")
buf = ReadBuffer(close_fd)
elif hasattr(arg, "read") or hasattr(arg, "write"):
close_fd = -1
buf = ReadBuffer(arg)
else:
raise ValueError("Can only open a SevenZip archive from filename, bytes, or a file descriptor")
archive = parse_headers(buf)
def stream_factory():
buf.seek(archive["payload_offset"], io.SEEK_SET)
stream = buf.get_sub_stream(archive["compressed_size"])
return open_lzma_stream(stream, archive["codec_properties"])
return SevenZipArchive(stream_factory, archive["files"])
| 35.546875 | 103 | 0.692747 | 306 | 2,275 | 5.03268 | 0.460784 | 0.051948 | 0.027273 | 0.027273 | 0.094805 | 0.067532 | 0.067532 | 0.067532 | 0.067532 | 0.067532 | 0 | 0.003409 | 0.226374 | 2,275 | 63 | 104 | 36.111111 | 0.871591 | 0.351209 | 0 | 0.205128 | 0 | 0 | 0.093087 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
632ae8cb57d3a6dadb0c0b2643a11345f2a5873c | 617 | py | Python | topicsketch/fast_hashing.py | zengjichuan/topicsketch | 87583224a8f8a01f9f3075197e13380c3dffbf0c | [
"Apache-2.0"
] | null | null | null | topicsketch/fast_hashing.py | zengjichuan/topicsketch | 87583224a8f8a01f9f3075197e13380c3dffbf0c | [
"Apache-2.0"
] | null | null | null | topicsketch/fast_hashing.py | zengjichuan/topicsketch | 87583224a8f8a01f9f3075197e13380c3dffbf0c | [
"Apache-2.0"
] | 1 | 2020-10-16T08:31:31.000Z | 2020-10-16T08:31:31.000Z | __author__ = 'Wei Xie'
__email__ = 'linegroup3@gmail.com'
__affiliation__ = 'Living Analytics Research Centre, Singapore Management University'
__website__ = 'http://mysmu.edu/phdis2012/wei.xie.2012'
import os
from ctypes import cdll
if os.name == 'posix':
hashBase = cdll.LoadLibrary('./c/mlh.so')
if os.name == 'nt':
hashBase = cdll.LoadLibrary('./c/mlh.dll')
HASH_NUMBER = 5
hashBase.initialize(HASH_NUMBER)
def hash_code(txt):
txt = txt.encode('ascii', 'xmlcharrefreplace')
l = len(txt)
l = min(32, l)
ret = [hashBase.hash(txt, l, h) for h in xrange(HASH_NUMBER)]
return ret
| 21.275862 | 85 | 0.687196 | 86 | 617 | 4.697674 | 0.651163 | 0.074257 | 0.039604 | 0.118812 | 0.133663 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023438 | 0.170178 | 617 | 28 | 86 | 22.035714 | 0.765625 | 0 | 0 | 0 | 0 | 0 | 0.293831 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.111111 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
632ce9d6446a546b1d57923692c44c5e8a75c68c | 787 | py | Python | src/features/util.py | Nazdarovja/Python_Semester_Project | de830060553c51389c3940e022ae038b41021455 | [
"MIT"
] | null | null | null | src/features/util.py | Nazdarovja/Python_Semester_Project | de830060553c51389c3940e022ae038b41021455 | [
"MIT"
] | null | null | null | src/features/util.py | Nazdarovja/Python_Semester_Project | de830060553c51389c3940e022ae038b41021455 | [
"MIT"
] | null | null | null | import pandas as pd
import os
def normalize(df, new_col_name, col_to_norm):
'''
ref: https://en.wikipedia.org/wiki/Normalization_(statistics)
'''
if (col_to_norm == 'word_count'):
max = 1718
min = 74
elif (col_to_norm == 'avg_word_len'):
max = 0.0010981580557913647
min = 1.431355415135597e-06
df[new_col_name] = df[col_to_norm].apply(lambda val: (val-min)/(max-min))
return df
def create_pickle(df, file_name):
"""
Parameters
----------
df : pandas.DataFrame
genre/lyrics dataframe
file_name: str
file name to create
"""
file_path = os.path.join('data','processed',file_name)
if os.path.isfile(file_path):
os.remove(file_path)
df.to_pickle(file_path) | 22.485714 | 77 | 0.612452 | 108 | 787 | 4.231481 | 0.5 | 0.043764 | 0.078775 | 0.052516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.075085 | 0.2554 | 787 | 35 | 78 | 22.485714 | 0.704778 | 0.217281 | 0 | 0 | 0 | 0 | 0.061837 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63306b75f0ff30d8e53db3fba7c56d2f4d97a88c | 507 | py | Python | pygis/path.py | giswqs/pygis | 28b0f20fbbd35ad7b1758bfda4e4b959eb89a915 | [
"MIT"
] | 16 | 2018-11-10T05:51:59.000Z | 2022-03-14T14:36:38.000Z | pygis/path.py | giswqs/pygis | 28b0f20fbbd35ad7b1758bfda4e4b959eb89a915 | [
"MIT"
] | 1 | 2020-08-13T13:19:40.000Z | 2020-08-13T13:36:24.000Z | pygis/path.py | giswqs/pygis | 28b0f20fbbd35ad7b1758bfda4e4b959eb89a915 | [
"MIT"
] | 4 | 2019-04-18T04:21:37.000Z | 2021-10-05T13:29:40.000Z | import os
import shutil
def mkdir(dir_name, root_dir='.'):
temp_dir = os.path.join(os.path.expanduser(root_dir), dir_name)
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
print("Folder created: {}".format(temp_dir))
return temp_dir
def rmdir(dir_name, root_dir='.'):
temp_dir = os.path.join(os.path.expanduser(root_dir), dir_name)
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
if __name__ == "__main__":
mkdir('temp2', '~')
rmdir('temp2', '~') | 25.35 | 67 | 0.658777 | 77 | 507 | 4.025974 | 0.311688 | 0.180645 | 0.087097 | 0.090323 | 0.529032 | 0.406452 | 0.406452 | 0.406452 | 0.406452 | 0.406452 | 0 | 0.004796 | 0.177515 | 507 | 20 | 68 | 25.35 | 0.738609 | 0 | 0 | 0.133333 | 0 | 0 | 0.07874 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.333333 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63317ffdf2a9daa41013957ccbd89119ec086b67 | 15,106 | py | Python | mycarehub/common/models/common_models.py | savannahghi/mycarehub-backend | 035471bf463d99ae7247cc25adc7062de681d9ea | [
"MIT"
] | 1 | 2022-01-06T12:20:15.000Z | 2022-01-06T12:20:15.000Z | mycarehub/common/models/common_models.py | savannahghi/mycarehub-backend | 035471bf463d99ae7247cc25adc7062de681d9ea | [
"MIT"
] | 5 | 2021-11-07T11:01:13.000Z | 2022-03-29T08:33:28.000Z | mycarehub/common/models/common_models.py | savannahghi/mycarehub-backend | 035471bf463d99ae7247cc25adc7062de681d9ea | [
"MIT"
] | 1 | 2022-01-17T10:56:42.000Z | 2022-01-17T10:56:42.000Z | from django.contrib.auth import get_user_model
from django.contrib.gis.db import models
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.db.models.fields.json import JSONField
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from ..constants import COUNTRY_CODES, WHITELIST_COUNTIES
from ..utils import get_constituencies, get_counties, get_sub_counties, get_wards
from .base_models import AbstractBase, AbstractBaseManager, AbstractBaseQuerySet, Attachment
User = get_user_model()
# =============================================================================
# QUERYSETS
# =============================================================================
class FacilityQuerySet(AbstractBaseQuerySet):
"""Queryset for the Facility model."""
def mycarehub_facilities(self):
"""Return all the facilities that are part of the FYJ program."""
return self.active().filter(
county__in=WHITELIST_COUNTIES,
)
# =============================================================================
# MANAGERS
# =============================================================================
class FacilityManager(AbstractBaseManager):
"""Manager for the UserFacilityAllotment model."""
def mycarehub_facilities(self):
"""Return all the facilities that are part of the FYJ program."""
return self.get_queryset().mycarehub_facilities()
def get_queryset(self):
return FacilityQuerySet(self.model, using=self.db)
# =============================================================================
# MODELS
# =============================================================================
class Facility(AbstractBase):
"""A facility with M&E reporting.
The data is fetched - and updated - from the Kenya Master Health Facilities List.
"""
name = models.TextField(unique=True)
description = models.TextField(blank=True, default="")
mfl_code = models.IntegerField(unique=True, help_text="MFL Code")
county = models.CharField(max_length=64, choices=get_counties())
phone = models.CharField(max_length=15, null=True, blank=True)
fhir_organization_id = models.CharField(unique=True, max_length=64, blank=True, null=True)
objects = FacilityManager()
model_validators = [
"check_facility_name_longer_than_three_characters",
]
def get_absolute_url(self):
update_url = reverse("common:facility_update", kwargs={"pk": self.pk})
return update_url
def check_facility_name_longer_than_three_characters(self):
if len(self.name) < 3:
raise ValidationError("the facility name should exceed 3 characters")
def __str__(self):
return f"{self.name} - {self.mfl_code} ({self.county})"
class Meta(AbstractBase.Meta):
verbose_name_plural = "facilities"
class FacilityAttachment(Attachment):
"""Any document attached to a facility."""
facility = models.ForeignKey(Facility, on_delete=models.PROTECT)
notes = models.TextField()
organisation_verify = ["facility"]
class Meta(AbstractBase.Meta):
"""Define ordering and other attributes for attachments."""
ordering = ("-updated", "-created")
class UserFacilityAllotment(AbstractBase):
"""Define the allocation of a facility/facilities to a user."""
class AllotmentType(models.TextChoices):
"""The type of facility allocation to a user."""
BY_FACILITY = "facility", "By Facility"
BY_REGION = "region", "By Region"
BY_FACILITY_AND_REGION = "both", "By Both Facility and Region"
class RegionType(models.TextChoices):
"""The type of region whose facilities are to be assigned user."""
COUNTY = "county"
CONSTITUENCY = "constituency"
SUB_COUNTY = "sub_county"
WARD = "ward"
user = models.OneToOneField(User, on_delete=models.PROTECT)
allotment_type = models.CharField(max_length=10, choices=AllotmentType.choices)
region_type = models.CharField(
max_length=20, choices=RegionType.choices, null=True, blank=True
)
facilities = models.ManyToManyField(Facility, blank=True)
counties = ArrayField(
models.CharField(max_length=150, choices=get_counties(), null=True, blank=True),
help_text=(
"All the facilities in the selected counties will be allocated to the selected user."
),
null=True,
blank=True,
)
constituencies = ArrayField(
models.CharField(max_length=150, choices=get_constituencies(), null=True, blank=True),
help_text=(
"All the facilities in the selected constituencies will be allocated to the selected "
"user."
),
null=True,
blank=True,
)
sub_counties = ArrayField(
models.CharField(max_length=150, choices=get_sub_counties(), null=True, blank=True),
help_text=(
"All the facilities in the selected sub counties will be allocated to the selected "
"user."
),
null=True,
blank=True,
)
wards = ArrayField(
models.CharField(max_length=150, choices=get_wards(), null=True, blank=True),
help_text=(
"All the facilities in the selected wards will be allocated to the selected user."
),
null=True,
blank=True,
)
model_validators = [
"check_region_type_is_provided_if_allot_by_region_or_both",
"check_county_is_provided_if_region_type_is_county",
"check_constituency_is_provided_if_region_type_is_constituency",
"check_sub_county_is_provided_if_region_type_is_sub_county",
"check_ward_is_provided_if_region_type_is_ward",
]
def check_region_type_is_provided_if_allot_by_region_or_both(self):
by_both = self.AllotmentType.BY_FACILITY_AND_REGION.value
by_region = self.AllotmentType.BY_REGION.value
if self.allotment_type in (by_both, by_region) and not self.region_type:
raise ValidationError(
{
"region_type": 'A region type must be provided if allotment type is "%s"'
% self.get_allotment_type_display() # noqa
},
code="required",
)
def check_county_is_provided_if_region_type_is_county(self):
by_both = self.AllotmentType.BY_FACILITY_AND_REGION.value
by_region = self.AllotmentType.BY_REGION.value
county = self.RegionType.COUNTY
if (
self.allotment_type in (by_both, by_region)
and self.region_type == county.value
and not self.counties
):
raise ValidationError(
{
"counties": 'At least 1 county must be selected if region type is "%s"'
% county.label
},
code="required",
)
def check_constituency_is_provided_if_region_type_is_constituency(self):
by_both = self.AllotmentType.BY_FACILITY_AND_REGION.value
by_region = self.AllotmentType.BY_REGION.value
constituency = self.RegionType.CONSTITUENCY
if (
self.allotment_type in (by_both, by_region)
and self.region_type == constituency.value
and not self.constituencies
):
raise ValidationError(
{
"constituencies": "At least 1 constituency must be selected if region type "
'is "%s"' % constituency.label
},
code="required",
)
def check_sub_county_is_provided_if_region_type_is_sub_county(self):
by_both = self.AllotmentType.BY_FACILITY_AND_REGION.value
by_region = self.AllotmentType.BY_REGION.value
sub_county = self.RegionType.SUB_COUNTY
if (
self.allotment_type in (by_both, by_region)
and self.region_type == sub_county.value
and not self.sub_counties
):
raise ValidationError(
{
"sub_counties": 'At least 1 sub_county must be selected if region type is "%s"'
% sub_county.label
},
code="required",
)
def check_ward_is_provided_if_region_type_is_ward(self):
by_both = self.AllotmentType.BY_FACILITY_AND_REGION.value
by_region = self.AllotmentType.BY_REGION.value
ward = self.RegionType.WARD
if (
self.allotment_type in (by_both, by_region)
and self.region_type == ward.value
and not self.wards
):
raise ValidationError(
{"wards": 'At least 1 ward must be selected if region type is "%s"' % ward.label},
code="required",
)
def get_absolute_url(self):
update_url = reverse("common:user_facility_allotment_update", kwargs={"pk": self.pk})
return update_url
def __str__(self):
return (
f"User: {self.user.name}; Allotment Type: {self.get_allotment_type_display()}" # noqa
)
@staticmethod
def get_facilities_for_user(user):
"""Return a queryset containing all the facilities allotted to the given user."""
allotment = UserFacilityAllotment.objects.filter(user=user).first()
if not allotment:
return Facility.objects.none()
return UserFacilityAllotment.get_facilities_for_allotment(allotment)
@staticmethod
def get_facilities_for_allotment(allotment: "UserFacilityAllotment"):
"""Return a queryset containing all the facilities specified in the given allotment."""
by_facility = UserFacilityAllotment.AllotmentType.BY_FACILITY.value
by_region = UserFacilityAllotment.AllotmentType.BY_REGION.value
by_facility_filter = UserFacilityAllotment._get_allot_by_facility_filter(allotment)
by_region_filter = UserFacilityAllotment._get_allot_by_region_filter(allotment)
facilities = Facility.objects.filter(organisation=allotment.organisation)
if allotment.allotment_type == by_facility:
return facilities.filter(**by_facility_filter)
if allotment.allotment_type == by_region:
return facilities.filter(**by_region_filter)
# for both facility and region
return facilities.filter(Q(**by_facility_filter) | Q(**by_region_filter))
@staticmethod
def _get_allot_by_facility_filter(allotment: "UserFacilityAllotment"):
"""Helper for generating a queryset filter."""
return {"pk__in": allotment.facilities.values_list("pk", flat=True)}
@staticmethod
def _get_allot_by_region_filter(allotment: "UserFacilityAllotment"):
"""Helper for generating a queryset filter."""
by_region_filter = {}
if allotment.region_type == UserFacilityAllotment.RegionType.COUNTY.value:
by_region_filter["county__in"] = allotment.counties
return by_region_filter
class Meta(AbstractBase.Meta):
"""Define ordering and other attributes for attachments."""
ordering = ("-updated", "-created")
class Address(AbstractBase):
class AddressType(models.TextChoices):
POSTAL = "POSTAL", _("Postal Address")
PHYSICAL = "PHYSICAL", _("Physical Address")
BOTH = "BOTH", _("Both physical and postal")
address_type = models.CharField(choices=AddressType.choices, max_length=16)
text = models.TextField()
postal_code = models.TextField()
country = models.CharField(max_length=255, choices=COUNTRY_CODES, default="KEN")
def __str__(self):
return f"{self.text} ({self.address_type})"
class Contact(AbstractBase):
class ContactType(models.TextChoices):
PHONE = "PHONE", _("PHONE")
EMAIL = "EMAIL", _("EMAIL")
class FlavourChoices(models.TextChoices):
PRO = "PRO", _("PRO")
CONSUMER = "CONSUMER", _("CONSUMER")
contact_type = models.CharField(choices=ContactType.choices, max_length=16)
contact_value = models.TextField()
opted_in = models.BooleanField(default=False)
flavour = models.CharField(
choices=FlavourChoices.choices, max_length=32, null=True, blank=True
)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True)
model_validators = ["validate_if_contact_exists"]
def __str__(self):
return f"{self.contact_value} ({self.contact_type})"
class Meta:
unique_together = ["contact_value", "flavour"]
def validate_if_contact_exists(self):
if Contact.objects.filter(
contact_value=self.contact_value,
contact_type=self.contact_type,
flavour=self.flavour,
).exists():
raise ValidationError(
_(
"Contact value %(contact_value)s of "
"type %(contact_type)s and flavour "
"%(flavour)s already exists"
),
params={
"contact_value": self.contact_value,
"contact_type": self.contact_type,
"flavour": self.flavour,
},
)
class AuditLog(AbstractBase):
"""
AuditLog is used to record all senstive changes
e.g
- changing a client's treatment buddy
- changing a client's facility
- deactivating a client
- changing a client's assigned community health volunteer
Rules of thumb: is there a need to find out what/when/why something
occured? Is a mistake potentially serious? Is there potential for
fraud?
"""
timestamp = models.DateTimeField(default=timezone.now)
record_type = models.TextField()
notes = models.TextField()
payload = JSONField()
class FAQ(AbstractBase):
class FlavourChoices(models.TextChoices):
PRO = "PRO", _("PRO")
CONSUMER = "CONSUMER", _("CONSUMER")
title = models.TextField(unique=True)
description = models.TextField(unique=True, null=True, blank=True)
body = models.TextField(unique=True)
flavour = models.CharField(
choices=FlavourChoices.choices, max_length=32, null=True, blank=True
)
class Notification(AbstractBase):
class FlavourChoices(models.TextChoices):
PRO = "PRO", _("PRO")
CONSUMER = "CONSUMER", _("CONSUMER")
title = models.CharField(max_length=64)
body = models.TextField()
notification_type = models.CharField(max_length=32)
flavour = models.CharField(choices=FlavourChoices.choices, max_length=32)
user = models.ForeignKey(User, null=True, blank=True, on_delete=models.CASCADE)
facility = models.ForeignKey(Facility, null=True, blank=True, on_delete=models.CASCADE)
is_read = models.BooleanField(default=False)
def __str__(self) -> str:
return f"{self.notification_type} - {self.title}"
| 36.576271 | 99 | 0.639481 | 1,659 | 15,106 | 5.596745 | 0.154913 | 0.025848 | 0.022402 | 0.029295 | 0.472698 | 0.417555 | 0.382876 | 0.354658 | 0.330102 | 0.24405 | 0 | 0.003919 | 0.239905 | 15,106 | 412 | 100 | 36.665049 | 0.804738 | 0.116113 | 0 | 0.302406 | 0 | 0 | 0.147475 | 0.042774 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072165 | false | 0 | 0.041237 | 0.020619 | 0.398625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6331bb66a1482025be06155fa8c5eeff39dc3faf | 839 | py | Python | pyazo/core/tests/test_templatetags.py | fossabot/pyazo | 86084642446b739845fa55e7a180e715ae59cb6c | [
"MIT"
] | 4 | 2020-05-19T22:47:25.000Z | 2021-06-21T23:03:01.000Z | pyazo/core/tests/test_templatetags.py | fossabot/pyazo | 86084642446b739845fa55e7a180e715ae59cb6c | [
"MIT"
] | 16 | 2020-02-11T16:13:47.000Z | 2020-10-05T21:13:16.000Z | pyazo/core/tests/test_templatetags.py | fossabot/pyazo | 86084642446b739845fa55e7a180e715ae59cb6c | [
"MIT"
] | 1 | 2020-03-04T08:22:01.000Z | 2020-03-04T08:22:01.000Z | """test template tags commands"""
from django.shortcuts import reverse
from django.test import RequestFactory, TestCase
from pyazo.core.templatetags.pyazo import back
class TemplateTagTest(TestCase):
"""Test django template tags"""
def setUp(self):
super().setUp()
self.factory = RequestFactory()
def test_back(self):
"""Test back"""
initial_request = self.factory.get(reverse("index"))
self.assertEqual(back({"request": initial_request}), "")
get_back_request = self.factory.get(reverse("index") + "?back=external")
self.assertEqual(back({"request": get_back_request}), "external")
meta_request = self.factory.get(reverse("index"))
meta_request.META["HTTP_REFERER"] = "external"
self.assertEqual(back({"request": meta_request}), "external")
| 34.958333 | 80 | 0.673421 | 94 | 839 | 5.893617 | 0.329787 | 0.099278 | 0.097473 | 0.113718 | 0.301444 | 0.1787 | 0 | 0 | 0 | 0 | 0 | 0 | 0.18236 | 839 | 23 | 81 | 36.478261 | 0.80758 | 0.075089 | 0 | 0 | 0 | 0 | 0.113158 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.133333 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6333b95989468dc79c379a2446d4b4fbcd5d697f | 9,410 | py | Python | nobrainer/intensity_transforms.py | kaczmarj/nobrainer | c1b17831a0e816d19ed79dbf620401f989f13bc2 | [
"Apache-2.0"
] | 17 | 2018-03-19T03:13:53.000Z | 2019-03-27T11:10:55.000Z | nobrainer/intensity_transforms.py | kaczmarj/nobrainer | c1b17831a0e816d19ed79dbf620401f989f13bc2 | [
"Apache-2.0"
] | 29 | 2018-02-08T14:49:06.000Z | 2019-03-19T21:03:58.000Z | nobrainer/intensity_transforms.py | kaczmarj/nobrainer | c1b17831a0e816d19ed79dbf620401f989f13bc2 | [
"Apache-2.0"
] | 12 | 2018-01-29T20:36:31.000Z | 2019-03-25T22:52:09.000Z | # TO DO def DivisiblePad(x,y= None, trans_xy= False, k):
import numpy as np
import tensorflow as tf
def addGaussianNoise(x, y=None, trans_xy=False, noise_mean=0.0, noise_std=0.1):
"""Add Gaussian noise to input and label.
Usage:
```python
>>> x = [[[1., 1., 1.]]]
>>> x_out = intensity_transforms.addGaussianNoise(x,
noise_mean=0.0, noise_std=1)
>>> x_out
<tf.Tensor: shape=(1, 1, 3), dtype=float32,
numpy=array([[[0.82689023, 1.9072294 , 1.9717102 ]]], dtype=float32)>
```
Parameters
----------
x: input is a tensor or numpy to have rank 3,
y: label is a tensor or numpy to have rank 3,
noise_mean: int, mean of Gaussian kernel. Default = 0.0;
noise_std: int, standard deviation of Gaussian kernel. Default=0.1;
trans_xy: Boolean, transforms both x and y. If set True, function
will require both x,y.
Returns
----------
Input and/or label tensor with added Gaussian noise.
"""
if ~tf.is_tensor(x):
x = tf.convert_to_tensor(x)
x = tf.cast(x, tf.float32)
noise = tf.random.normal(x.shape, noise_mean, noise_std, dtype=x.dtype)
if trans_xy:
if y is None:
raise ValueError("`LabelMap' should be assigned")
if ~tf.is_tensor(y):
y = tf.convert_to_tensor(y)
if len(y.shape) != 3:
raise ValueError("`LabelMap` must be equal or higher than rank 2")
y = tf.cast(y, tf.float32)
return tf.math.add(x, noise), tf.math.add(y, noise)
else:
return tf.math.add(x, noise)
def minmaxIntensityScaling(x, y=None, trans_xy=False):
"""Apply intensity scaling [0-1] to input and label.
Usage:
```python
>>> x = [[[0., 2., 1.]]]
>>> x_out = intensity_transforms.minmaxIntensityScaling(x)
>>> x_out
<tf.Tensor: shape=(1, 1, 3), dtype=float32,
numpy=array([[[0., 1. , 0.5]]], dtype=float32)>
```
Parameters
----------
x: input is a tensor or numpy to have rank 3,
y: label is a tensor or numpy to have rank 3,
trans_xy: Boolean, transforms both x and y. If set True, function
will require both x,y.
Returns
----------
Input and/or label tensor with scaled intensity.
"""
if ~tf.is_tensor(x):
x = tf.convert_to_tensor(x)
x = tf.cast(x, tf.float32)
ep = tf.cast(
tf.convert_to_tensor(1e-8 * np.ones(x.shape).astype(np.float32)), tf.float32
)
xmin = tf.cast(tf.reduce_min(x), tf.float32)
xmax = tf.cast(tf.reduce_max(x), tf.float32)
x = tf.divide(tf.subtract(x, xmin), tf.add(tf.subtract(xmax, xmin), ep))
if trans_xy:
if y is None:
raise ValueError("`LabelMap' should be assigned")
if len(y.shape) != 3:
raise ValueError("`LabelMap` must be equal or higher than rank 2")
if ~tf.is_tensor(y):
y = tf.convert_to_tensor(y)
y = tf.cast(y, tf.float32)
ymin = tf.cast(tf.reduce_min(y), tf.float32)
ymax = tf.cast(tf.reduce_max(y), tf.float32)
y = tf.divide(tf.subtract(y, ymin), tf.add(tf.subtract(ymax, ymin), ep))
return x, y
def customIntensityScaling(x, y=None, trans_xy=False, scale_x=[0.0, 1.0], scale_y=None):
"""Apply custom intensity scaling to input and label.
Usage:
```python
>>> x = [[[2., 2., 1.]]]
>>> y = [[[1., 0., 1.]]]
>>> x_out, y_out = intensity_transforms.customIntensityScaling(
x, y, trans_xy=True, scale_x=[0, 4], scale_y=[0, 3])
>>> x_out
<tf.Tensor: shape=(1, 1, 3), dtype=float32,
numpy=array([[[4., 4., 0.]]], dtype=float32)>
>>> y_out
<tf.Tensor: shape=(1, 1, 3), dtype=float32,
numpy=array([[[3., 0., 3.]]], dtype=float32)>
```
Parameters
----------
x: input is a tensor or numpy to have rank 3,
y: label is a tensor or numpy to have rank 3,
trans_xy: Boolean, transforms both x and y (Default: False).
If set True, function will require both x,y.
scale_x: [minimum(int), maximum(int)]
scale_y: [minimum(int), maximum(int)]
Returns
----------
Input and/or label tensor with custom scaled Intensity.
"""
x_norm, y_norm = minmaxIntensityScaling(x, y, trans_xy)
minx = tf.cast(
tf.convert_to_tensor(scale_x[0] * np.ones(x_norm.shape).astype(np.float32)),
tf.float32,
)
maxx = tf.cast(
tf.convert_to_tensor(scale_x[1] * np.ones(x_norm.shape).astype(np.float32)),
tf.float32,
)
diff_x = tf.subtract(maxx, minx)
x = tf.add(tf.multiply(x_norm, diff_x), minx)
if trans_xy:
if y is None:
raise ValueError("`LabelMap' should be assigned")
if scale_y is None:
raise ValueError("LabelMap scaling arguments as: scale_Y=[a,b]")
y = tf.cast(y, tf.float32)
miny = tf.cast(
tf.convert_to_tensor(scale_y[0] * np.ones(y_norm.shape).astype(np.float32)),
tf.float32,
)
maxy = tf.cast(
tf.convert_to_tensor(scale_y[1] * np.ones(y_norm.shape).astype(np.float32)),
tf.float32,
)
diff_y = tf.subtract(maxy, miny)
y = tf.add(tf.multiply(y_norm, diff_y), miny)
return x, y
else:
return x
def intensityMasking(x, mask_x, y=None, trans_xy=False, mask_y=None):
"""Masking the intensity values in input and label.
Usage:
```python
>>> mask_x = np.array([[[0, 0, 0], [0, 1, 0], [0, 0, 0]]])
>>> x = np.array([[[1, 1, 1], [2, 2, 2], [3, 3, 3]]])
>>> x_out = intensity_transforms.intensityMasking(x,
mask_x=mask_x)
>>> x_out
(<tf.Tensor: shape=(2, 3, 3), dtype=float32, numpy=
array([[[0., 0., 0.],
[0., 2., 0.],
[0., 0., 0.]], dtype=float32)>, None)
```
Parameters
----------
x: input is a tensor or numpy to have rank 3,
y: label is a tensor or numpy to have rank 3,
mask_x: mask tensor or numpy array of same shape as x
trans_xy: Boolean, transforms both x and y (Default: False).
If set True, function will require both x,y.
Returns
----------
Masked input and/or label tensor.
"""
if ~tf.is_tensor(x):
x = tf.convert_to_tensor(x)
x = tf.cast(x, tf.float32)
if ~tf.is_tensor(mask_x):
mask_x = tf.convert_to_tensor(mask_x)
mask_x = tf.cast(mask_x, tf.float32)
if mask_x.shape[0] != x.shape[0] and mask_x.shape[1] != x.shape[1]:
raise ValueError("Masks shape should be same as Input")
x = tf.multiply(x, mask_x)
if trans_xy:
if y is None:
raise ValueError("`LabelMap' should be assigned")
if mask_y is None:
raise ValueError("Label Mask should not be none")
if ~tf.is_tensor(y) and ~tf.is_tensor(mask_y):
y = tf.convert_to_tensor(y)
mask_y = tf.convert_to_tensor(mask_y)
y = tf.cast(y, tf.float32)
mask_x = tf.cast(mask_x, tf.float32)
if mask_y.shape[0] != y.shape[0] and mask_x.shape[1] != x.shape[1]:
raise ValueError("Label Masks shape should be same as Label")
return x, tf.multiply(y, mask_y)
else:
return x
def contrastAdjust(x, y=None, trans_xy=False, gamma=1.0):
"""Apply contrast adjustment to input and label.
Usage:
```python
>>> gamma = 1.5
>>> epsilon = 1e-7
>>> x = np.array([[[1, 1, 1], [2, 2, 2], [3, 3, 3]]])
>>> x_out
(<tf.Tensor: shape=(1, 3, 3), dtype=float32, numpy=
array([[[1. , 1. , 1. ],
[1.7071067, 1.7071067, 1.7071067],
[3. , 3. , 3. ]]], dtype=float32)>, None)
```
Parameters
----------
x: input is a tensor or numpy to have rank 3,
y: label is a tensor or numpy to have rank 3,
gamma: int, a contrast adjustment constant
trans_xy: Boolean, transforms both x and y (Default: False).
If set True, function will require both x,y.
Returns
----------
Input and/or label tensor with adjusted contrast.
"""
if ~tf.is_tensor(x):
x = tf.convert_to_tensor(x)
x = tf.cast(x, tf.float32)
ep = tf.cast(
tf.convert_to_tensor(1e-7 * np.ones(x.shape).astype(np.float32)), tf.float32
)
gamma = tf.cast(
tf.convert_to_tensor(gamma * np.ones(x.shape).astype(np.float32)), tf.float32
)
xmin = tf.cast(tf.reduce_min(x), tf.float32)
xmax = tf.cast(tf.reduce_max(x), tf.float32)
x_range = tf.subtract(xmax, xmin)
x = tf.pow(tf.divide(tf.subtract(x, xmin), tf.add(x_range, ep)), gamma)
x = tf.add(tf.multiply(x, x_range), xmin)
if trans_xy:
if y is None:
raise ValueError("`LabelMap' should be assigned")
if len(y.shape) != 3:
raise ValueError("`LabelMap` must be equal or higher than rank 2")
if ~tf.is_tensor(y):
y = tf.convert_to_tensor(y)
y = tf.cast(y, tf.float32)
ymin = tf.cast(tf.reduce_min(y), tf.float32)
ymax = tf.cast(tf.reduce_max(y), tf.float32)
y_range = tf.subtract(ymax, ymin)
y = tf.pow(tf.divide(tf.subtract(y, ymin), tf.add(y_range, ep)), gamma)
y = tf.add(tf.multiply(y, y_range), ymin)
return x, y
else:
return x
| 34.851852 | 88 | 0.569394 | 1,440 | 9,410 | 3.619444 | 0.096528 | 0.016117 | 0.035879 | 0.055449 | 0.742134 | 0.694743 | 0.586915 | 0.555065 | 0.509785 | 0.502878 | 0 | 0.041255 | 0.278746 | 9,410 | 269 | 89 | 34.981413 | 0.726683 | 0.395537 | 0 | 0.559055 | 0 | 0 | 0.082743 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03937 | false | 0 | 0.015748 | 0 | 0.125984 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |