hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6a1f1b69ee306e65ab06cc8411c8b814a7455225
| 4,886
|
py
|
Python
|
server/openapi_server/controllers/data_transformation_controller.py
|
mintproject/MINT-ModelCatalogIngestionAPI
|
026d3495483a3e48ea3c1364d0dda09beeea69e4
|
[
"Apache-2.0"
] | 2
|
2019-05-30T21:33:43.000Z
|
2019-09-27T21:04:38.000Z
|
server/openapi_server/controllers/data_transformation_controller.py
|
mintproject/model-catalog-api
|
2ad7016691891497bba37afe8ceb0fea8fe769e5
|
[
"Apache-2.0"
] | 82
|
2019-10-08T16:35:34.000Z
|
2022-03-15T18:25:27.000Z
|
server/openapi_server/controllers/data_transformation_controller.py
|
mintproject/model-catalog-api
|
2ad7016691891497bba37afe8ceb0fea8fe769e5
|
[
"Apache-2.0"
] | null | null | null |
import connexion
import six
from openapi_server import query_manager
from openapi_server.utils.vars import DATATRANSFORMATION_TYPE_NAME, DATATRANSFORMATION_TYPE_URI
from openapi_server.models.data_transformation import DataTransformation # noqa: E501
from openapi_server import util
def custom_datasetspecifications_id_datatransformations_get(id, custom_query_name=None, username=None): # noqa: E501
"""Gets a list of data transformations related a dataset
Gets a list of data transformations related a dataset # noqa: E501
:param id: The ID of the dataspecification
:type id: str
:param custom_query_name: Name of the custom query
:type custom_query_name: str
:param username: Username to query
:type username: str
:rtype: List[DataTransformation]
"""
return query_manager.get_resource(id=id,
custom_query_name=custom_query_name,
username=username,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_get(username=None, label=None, page=None, per_page=None): # noqa: E501
"""List all instances of DataTransformation
Gets a list of all instances of DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param username: Name of the user graph to query
:type username: str
:param label: Filter by label
:type label: str
:param page: Page number
:type page: int
:param per_page: Items per page
:type per_page: int
:rtype: List[DataTransformation]
"""
return query_manager.get_resource(
username=username,
label=label,
page=page,
per_page=per_page,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_id_delete(id, user=None): # noqa: E501
"""Delete an existing DataTransformation
Delete an existing DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param id: The ID of the DataTransformation to be retrieved
:type id: str
:param user: Username
:type user: str
:rtype: None
"""
return query_manager.delete_resource(id=id,
user=user,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_id_get(id, username=None): # noqa: E501
"""Get a single DataTransformation by its id
Gets the details of a given DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param id: The ID of the DataTransformation to be retrieved
:type id: str
:param username: Name of the user graph to query
:type username: str
:rtype: DataTransformation
"""
return query_manager.get_resource(id=id,
username=username,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_id_put(id, user=None, data_transformation=None): # noqa: E501
"""Update an existing DataTransformation
Updates an existing DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param id: The ID of the DataTransformation to be retrieved
:type id: str
:param user: Username
:type user: str
:param data_transformation: An old DataTransformationto be updated
:type data_transformation: dict | bytes
:rtype: DataTransformation
"""
if connexion.request.is_json:
data_transformation = DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.put_resource(id=id,
user=user,
body=data_transformation,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
def datatransformations_post(user=None, data_transformation=None): # noqa: E501
"""Create one DataTransformation
Create a new instance of DataTransformation (more information in https://w3id.org/okn/o/sd#DataTransformation) # noqa: E501
:param user: Username
:type user: str
:param data_transformation: Information about the DataTransformationto be created
:type data_transformation: dict | bytes
:rtype: DataTransformation
"""
if connexion.request.is_json:
data_transformation = DataTransformation.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.post_resource(
user=user,
body=data_transformation,
rdf_type_uri=DATATRANSFORMATION_TYPE_URI,
rdf_type_name=DATATRANSFORMATION_TYPE_NAME,
kls=DataTransformation)
| 33.465753
| 134
| 0.731068
| 603
| 4,886
| 5.736318
| 0.157546
| 0.034692
| 0.052616
| 0.060711
| 0.679676
| 0.670425
| 0.662619
| 0.640648
| 0.58514
| 0.539173
| 0
| 0.012765
| 0.198322
| 4,886
| 145
| 135
| 33.696552
| 0.870309
| 0.458657
| 0
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.115385
| 0
| 0.346154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a1f4e62deeca6901732e02e6f44f1571b8f71c9
| 2,634
|
py
|
Python
|
shap/plots/monitoring.py
|
NunoEdgarGFlowHub/shap
|
6992883fb3470163fcbe2bfacae0bd5f724ed1f8
|
[
"MIT"
] | 8
|
2019-09-23T16:20:40.000Z
|
2021-10-09T20:26:20.000Z
|
shap/plots/monitoring.py
|
NunoEdgarGFlowHub/shap
|
6992883fb3470163fcbe2bfacae0bd5f724ed1f8
|
[
"MIT"
] | 1
|
2019-02-22T10:16:13.000Z
|
2019-02-22T10:16:13.000Z
|
shap/plots/monitoring.py
|
NunoEdgarGFlowHub/shap
|
6992883fb3470163fcbe2bfacae0bd5f724ed1f8
|
[
"MIT"
] | 4
|
2019-06-28T12:50:51.000Z
|
2021-07-02T07:42:18.000Z
|
import numpy as np
import scipy
import warnings
try:
import matplotlib.pyplot as pl
import matplotlib
except ImportError:
warnings.warn("matplotlib could not be loaded!")
pass
from . import labels
from . import colors
def truncate_text(text, max_len):
if len(text) > max_len:
return text[:int(max_len/2)-2] + "..." + text[-int(max_len/2)+1:]
else:
return text
def monitoring_plot(ind, shap_values, features, feature_names=None):
""" Create a SHAP monitoring plot.
(Note this function is preliminary and subject to change!!)
A SHAP monitoring plot is meant to display the behavior of a model
over time. Often the shap_values given to this plot explain the loss
of a model, so changes in a feature's impact on the model's loss over
time can help in monitoring the model's performance.
Parameters
----------
ind : int
Index of the feature to plot.
shap_values : numpy.array
Matrix of SHAP values (# samples x # features)
features : numpy.array or pandas.DataFrame
Matrix of feature values (# samples x # features)
feature_names : list
Names of the features (length # features)
"""
if str(type(features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = features.columns
features = features.values
pl.figure(figsize=(10,3))
ys = shap_values[:,ind]
xs = np.arange(len(ys))#np.linspace(0, 12*2, len(ys))
pvals = []
inc = 50
for i in range(inc, len(ys)-inc, inc):
#stat, pval = scipy.stats.mannwhitneyu(v[:i], v[i:], alternative="two-sided")
stat, pval = scipy.stats.ttest_ind(ys[:i], ys[i:])
pvals.append(pval)
min_pval = np.min(pvals)
min_pval_ind = np.argmin(pvals)*inc + inc
if min_pval < 0.05 / shap_values.shape[1]:
pl.axvline(min_pval_ind, linestyle="dashed", color="#666666", alpha=0.2)
pl.scatter(xs, ys, s=10, c=features[:,ind], cmap=colors.red_blue)
pl.xlabel("Sample index")
pl.ylabel(truncate_text(feature_names[ind], 30) + "\nSHAP value", size=13)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('left')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
cb = pl.colorbar()
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 20)
cb.set_label(truncate_text(feature_names[ind], 30), size=13)
pl.show()
| 33.769231
| 85
| 0.648823
| 389
| 2,634
| 4.290488
| 0.442159
| 0.03595
| 0.026962
| 0.015578
| 0.051528
| 0.034751
| 0
| 0
| 0
| 0
| 0
| 0.019015
| 0.221336
| 2,634
| 78
| 86
| 33.769231
| 0.794734
| 0.291192
| 0
| 0
| 0
| 0
| 0.066443
| 0.01675
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0.021739
| 0.173913
| 0
| 0.26087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a2025301420406c02ae8d4c4fc4c88641b66f90
| 7,702
|
py
|
Python
|
code/testbed/pde1/FemPde1.py
|
nicolai-schwartze/Masterthesis
|
7857af20c6b233901ab3cedc325bd64704111e16
|
[
"MIT"
] | 1
|
2020-06-13T10:02:02.000Z
|
2020-06-13T10:02:02.000Z
|
code/testbed/pde1/FemPde1.py
|
nicolai-schwartze/Masterthesis
|
7857af20c6b233901ab3cedc325bd64704111e16
|
[
"MIT"
] | null | null | null |
code/testbed/pde1/FemPde1.py
|
nicolai-schwartze/Masterthesis
|
7857af20c6b233901ab3cedc325bd64704111e16
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 13 14:57:32 2020
@author: Nicolai
"""
import sys
import os
importpath = os.path.dirname(os.path.realpath(__file__)) + "/../"
sys.path.append(importpath)
from FemPdeBase import FemPdeBase
import numpy as np
# import from ngsolve
import ngsolve as ngs
from netgen.geom2d import unit_square
import time
import psutil
import gc
class FemPde1(FemPdeBase):
"""
**Implementation of PDE1 of the testbed:**
.. math::
- \Delta u(\mathbf{x}) = -2^{40}y^{10}(1-y)^{10}[90x^8(1-x)^{10}
- 200x^9(1-x)^9 + 90x^{10}(1-x)^8]
-2^{40}x^{10}(1-x)^{10}[90y^8(1-y)^{10}
- 200y^9(1-y)^9 + 90y^{10}(1-y)^8]
\Omega: \mathbf{x} \in [0,1]
u(\mathbf{x})|_{\partial \Omega} = 0
**with the solution:**
.. math::
u(\mathbf{x}) = 2^{40}x^{10}(1-x)^{10}y^{10}(1-y)^{10}
Attributes
----------
max_nodf: int
the maximum number of degrees of freedom that can be created in the
adaptive mesh refinement, standard value is 50000
Methods
-------
solve()
solves the pde by calling ngsolve, provides: static condensation,
adaptive mesh refinement, parallelisation (where possible), sets the
internal variables for evaluating the exact solution and calculating
the distance between exact and approx solution
also sets execution time and memory consumption
Examples
--------
>>> import numpy as np
>>> fempde2 = FemPde2(True)
>>> pos = np.array([0.5, 0.5])
>>> fempde2.exact(pos)
>>> x -> numpy.ndarray with shape (2,)
_mesh -> ngs.comp.Mesh
_ngs_ex -> ngs.fem.CoefficientFunction
-> try to call solve() first
>>> fempde2.solve()
>>> fempde2.exact(pos)
1.0
>>> fempde2.approx(pos)
0.999998924259486
>>> fempde2.normL2()
5.853102150391562e-07
>>> fempde2.exec_time
3.830256175994873
>>> fempde2.mem_consumption
76705792
"""
def __init__(self, show_gui, max_ndof=50000):
super().__init__(show_gui)
# init protected
self._pde_string = "-laplacian(u(x)) = -(2^40*y^10*(1-y)^10*(90*x^8*(1-x)^10 - 200*x^9*(1-x)^9 + 90*x^10*(1-x)^8)) -(2^40*x^10*(1-x)^10*(90*y^8*(1-y)^10 - 200*y^9*(1-y)^9 + 90*y^10*(1-y)^8))"
self._ngs_ex = (2**(4*10))*(ngs.x**10)*((1-ngs.x)**10)*(ngs.y**10)*((1-ngs.y)**10)
# init public
self.max_ndof = max_ndof
def solve(self):
# disable garbage collector
# --------------------------------------------------------------------#
gc.disable()
while(gc.isenabled()):
time.sleep(0.1)
# --------------------------------------------------------------------#
# measure how much memory is used until here
process = psutil.Process()
memstart = process.memory_info().vms
# starts timer
tstart = time.time()
if self.show_gui:
import netgen.gui
# create mesh with initial size 0.1
self._mesh = ngs.Mesh(unit_square.GenerateMesh(maxh=0.1))
#create finite element space
self._fes = ngs.H1(self._mesh, order=2, dirichlet=".*", autoupdate=True)
# test and trail function
u = self._fes.TrialFunction()
v = self._fes.TestFunction()
# create bilinear form and enable static condensation
self._a = ngs.BilinearForm(self._fes, condense=True)
self._a += ngs.grad(u)*ngs.grad(v)*ngs.dx
# creat linear functional and apply RHS
self._f = ngs.LinearForm(self._fes)
self._f += ( \
-(2**40*ngs.y**10*(1-ngs.y)**10*(90*ngs.x**8*(1-ngs.x)**10 - 200*ngs.x**9*(1-ngs.x)**9 + 90*ngs.x**10*(1-ngs.x)**8)) \
-(2**40*ngs.x**10*(1-ngs.x)**10*(90*ngs.y**8*(1-ngs.y)**10 - 200*ngs.y**9*(1-ngs.y)**9 + 90*ngs.y**10*(1-ngs.y)**8)) )*v*ngs.dx
# preconditioner: multigrid - what prerequisits must the problem have?
self._c = ngs.Preconditioner(self._a,"multigrid")
# create grid function that holds the solution and set the boundary to 0
self._gfu = ngs.GridFunction(self._fes, autoupdate=True) # solution
self._g = 0.0
self._gfu.Set(self._g, definedon=self._mesh.Boundaries(".*"))
# draw grid function in gui
if self.show_gui:
ngs.Draw(self._gfu)
# create Hcurl space for flux calculation and estimate error
self._space_flux = ngs.HDiv(self._mesh, order=2, autoupdate=True)
self._gf_flux = ngs.GridFunction(self._space_flux, "flux", autoupdate=True)
# TaskManager starts threads that (standard thread nr is numer of cores)
with ngs.TaskManager():
# this is the adaptive loop
while self._fes.ndof < self.max_ndof:
self._solveStep()
self._estimateError()
self._mesh.Refine()
# since the adaptive loop stopped with a mesh refinement, the gfu must be
# calculated one last time
self._solveStep()
if self.show_gui:
ngs.Draw(self._gfu)
# set measured exectution time
self._exec_time = time.time() - tstart
# set measured used memory
memstop = process.memory_info().vms - memstart
self._mem_consumption = memstop
# enable garbage collector
# --------------------------------------------------------------------#
gc.enable()
gc.collect()
# --------------------------------------------------------------------#
if __name__ == "__main__":
fempde1 = FemPde1(True)
print(fempde1.pde_string)
try:
fempde1.exact(np.array([0.5,0.5]))
except:
print("Î error message above")
try:
fempde1.approx(np.array([0.5,0.5]))
except:
print("Î error message above")
fempde1.solve()
print("-------------------------------------")
print("exact(0.5, 0.5) = {}".format(fempde1.exact(np.array([0.5,0.5]))))
print("approx(0.5, 0.5) = {}".format(fempde1.approx(np.array([0.5,0.5]))))
print("L2 norm to the real solution {}".format(fempde1.normL2()))
print("solving took {} sec".format(fempde1.exec_time))
print("solving uses {} Mb".format(fempde1.mem_consumption/1000000))
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(0, 1.01, 0.01)
X, Y = np.meshgrid(x, y)
zs0 = np.array([fempde1.exact(\
np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs0.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)
fig.tight_layout()
ax.set_xlabel("X0")
ax.set_ylabel("X1")
ax.set_zlabel("f(X0, X1)")
plt.show()
fig.savefig("sol_pde_1.pdf", bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(0, 1.01, 0.01)
X, Y = np.meshgrid(x, y)
zs0 = np.array([fempde1.approx(\
np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs0.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)
ax.set_xlabel("X0")
ax.set_ylabel("X1")
ax.set_zlabel("f(X0,X1)")
plt.show()
| 31.695473
| 199
| 0.532849
| 1,030
| 7,702
| 3.88835
| 0.296117
| 0.011985
| 0.006991
| 0.006991
| 0.210986
| 0.205493
| 0.19226
| 0.174282
| 0.137328
| 0.137328
| 0
| 0.068829
| 0.279408
| 7,702
| 243
| 200
| 31.695473
| 0.652793
| 0.343677
| 0
| 0.336538
| 0
| 0.009615
| 0.090833
| 0.023231
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019231
| false
| 0
| 0.144231
| 0
| 0.173077
| 0.086538
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a213e8a5b6a8886b1f3aeab6a75af090df46ca9
| 996
|
py
|
Python
|
LeetCode/530 Minimum Absolute Difference in BST.py
|
gesuwen/Algorithms
|
0c9cf4412d76f8b69ef68cc80636323f5a0e5786
|
[
"MIT"
] | null | null | null |
LeetCode/530 Minimum Absolute Difference in BST.py
|
gesuwen/Algorithms
|
0c9cf4412d76f8b69ef68cc80636323f5a0e5786
|
[
"MIT"
] | null | null | null |
LeetCode/530 Minimum Absolute Difference in BST.py
|
gesuwen/Algorithms
|
0c9cf4412d76f8b69ef68cc80636323f5a0e5786
|
[
"MIT"
] | null | null | null |
# Binary Search Tree
# Given a binary search tree with non-negative values, find the minimum absolute difference between values of any two nodes.
#
# Example:
#
# Input:
#
# 1
# \
# 3
# /
# 2
#
# Output:
# 1
#
# Explanation:
# The minimum absolute difference is 1, which is the difference between 2 and 1 (or between 2 and 3).
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def getMinimumDifference(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.minDiff = []
def travel(node):
if not node:
return
self.minDiff.append(node.val)
L = travel(node.left)
R = travel(node.right)
travel(root)
self.minDiff = sorted(self.minDiff)
return min(abs(a - b) for a, b in zip(self.minDiff, self.minDiff[1:]))
| 23.162791
| 124
| 0.566265
| 126
| 996
| 4.444444
| 0.5
| 0.117857
| 0.057143
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014925
| 0.327309
| 996
| 42
| 125
| 23.714286
| 0.820896
| 0.49498
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a24c49a2e92d735c1970a4ba7a5a35023549f08
| 504
|
py
|
Python
|
app/database/database.py
|
luisornelasch/melp
|
82ff5c84d0df866ee64da10b96f61400c0809845
|
[
"MIT"
] | null | null | null |
app/database/database.py
|
luisornelasch/melp
|
82ff5c84d0df866ee64da10b96f61400c0809845
|
[
"MIT"
] | null | null | null |
app/database/database.py
|
luisornelasch/melp
|
82ff5c84d0df866ee64da10b96f61400c0809845
|
[
"MIT"
] | null | null | null |
from sqlalchemy import create_engine, engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import os
SQLALCHEMY_DATABASE_URL = os.getenv("DATABASE_URL").replace("postgres://", "postgresql+psycopg2://")
engine = create_engine(SQLALCHEMY_DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
| 24
| 100
| 0.753968
| 59
| 504
| 6.271186
| 0.508475
| 0.113514
| 0.113514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002326
| 0.146825
| 504
| 20
| 101
| 25.2
| 0.85814
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 0.043651
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.285714
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a2905a1e278bec5cf1d153f6d2fadf970789157
| 2,657
|
py
|
Python
|
tests/test_utils.py
|
ozora-ogino/tflite-human-tracking
|
d1be51c628e1464b5e2953a611df6e974a9ffbaa
|
[
"MIT"
] | 3
|
2021-12-20T00:43:28.000Z
|
2022-03-12T00:54:42.000Z
|
tests/test_utils.py
|
ozora-ogino/tflite-human-tracking
|
d1be51c628e1464b5e2953a611df6e974a9ffbaa
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
ozora-ogino/tflite-human-tracking
|
d1be51c628e1464b5e2953a611df6e974a9ffbaa
|
[
"MIT"
] | 5
|
2021-12-03T08:59:18.000Z
|
2022-03-17T11:25:38.000Z
|
from src.utils import check_direction, direction_config, is_intersect
# pylint:disable=unexpected-keyword-arg
class TestCheckDirection:
def test_true(self):
"""Test true case."""
directions = {
"right": {"prev_center": [0, 0], "current_center": [20, 0], "expect": True},
"left": {"prev_center": [10, 0], "current_center": [0, 0], "expect": True},
"top": {"prev_center": [0, 10], "current_center": [0, 0], "expect": True},
"bottom": {"prev_center": [0, 0], "current_center": [0, 10], "expect": True},
}
for direction_str, args in directions.items():
expect = args.pop("expect")
result = check_direction(**args, direction=direction_config[direction_str])
assert result == expect
def test_false(self):
"""Test false case."""
directions = {
"right": {"prev_center": [0, 0], "current_center": [0, 0], "expect": False},
# This is right.
"left": {"prev_center": [0, 0], "current_center": [10, 0], "expect": False},
# This is bottom.
"top": {"prev_center": [0, 0], "current_center": [0, 10], "expect": False},
# This is top.
"bottom": {"prev_center": [0, 10], "current_center": [0, 0], "expect": False},
}
for direction_str, args in directions.items():
expect = args.pop("expect")
result = check_direction(**args, direction=direction_config[direction_str])
assert result == expect
def test_direction_none(self):
"""Check if always return true when direction is set None."""
args = [
{"prev_center": [0, 0], "current_center": [0, 0]}, # No movement.
{"prev_center": [0, 0], "current_center": [10, 0]}, # Right
{"prev_center": [10, 0], "current_center": [0, 0]}, # Left.
{"prev_center": [0, 10], "current_center": [0, 0]}, # Top.
{"prev_center": [0, 0], "current_center": [0, 10]}, # Bottom.
]
for arg in args:
# If the direction is None, always return True.
result = check_direction(**arg, direction=None)
assert result == True
class TestIsIntersect:
def test_true(self):
"""Test true case."""
args = {"A": [10, 0], "B": [10, 30], "C": [0, 10], "D": [30, 0]}
result = is_intersect(**args)
assert result == True
def test_false(self):
"""Test false case."""
args = {"A": [10, 0], "B": [10, 30], "C": [0, 10], "D": [0, 0]}
result = is_intersect(**args)
assert result == False
| 42.174603
| 90
| 0.530297
| 315
| 2,657
| 4.326984
| 0.184127
| 0.10785
| 0.088041
| 0.070433
| 0.705062
| 0.688188
| 0.666178
| 0.55099
| 0.426999
| 0.237711
| 0
| 0.04661
| 0.289424
| 2,657
| 62
| 91
| 42.854839
| 0.675318
| 0.108393
| 0
| 0.409091
| 0
| 0
| 0.183805
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 1
| 0.113636
| false
| 0
| 0.022727
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a29e328b66b3aa40c02b6c801e1beb3b20cffb7
| 1,470
|
py
|
Python
|
tests/unit/transport/s3/test_settings.py
|
TinkoffCreditSystems/overhave
|
b0ab705ef5c5c5a65fa0b14b173b64fd7310e187
|
[
"Apache-2.0"
] | 33
|
2021-02-01T15:49:37.000Z
|
2021-12-20T00:44:43.000Z
|
tests/unit/transport/s3/test_settings.py
|
TinkoffCreditSystems/overhave
|
b0ab705ef5c5c5a65fa0b14b173b64fd7310e187
|
[
"Apache-2.0"
] | 46
|
2021-02-03T12:56:52.000Z
|
2021-12-19T18:50:27.000Z
|
tests/unit/transport/s3/test_settings.py
|
TinkoffCreditSystems/overhave
|
b0ab705ef5c5c5a65fa0b14b173b64fd7310e187
|
[
"Apache-2.0"
] | 1
|
2021-12-07T09:02:44.000Z
|
2021-12-07T09:02:44.000Z
|
import pytest
from pydantic import ValidationError
from overhave.transport import OverhaveS3ManagerSettings
class TestS3ManagerSettings:
""" Unit tests for :class:`OverhaveS3ManagerSettings`. """
@pytest.mark.parametrize("test_s3_enabled", [False])
def test_disabled(self, test_s3_enabled: bool) -> None:
settings = OverhaveS3ManagerSettings(enabled=test_s3_enabled)
assert not settings.enabled
assert not settings.url
assert not settings.access_key
assert not settings.secret_key
@pytest.mark.parametrize("test_s3_enabled", [True])
def test_empty_enabled(self, test_s3_enabled: bool) -> None:
with pytest.raises(ValidationError):
OverhaveS3ManagerSettings(enabled=test_s3_enabled)
@pytest.mark.parametrize("test_s3_autocreate_buckets", [False, True], indirect=True)
@pytest.mark.parametrize("test_s3_enabled", [True], indirect=True)
def test_correct_enabled(
self,
test_s3_enabled: bool,
test_s3_autocreate_buckets: bool,
test_s3_manager_settings: OverhaveS3ManagerSettings,
) -> None:
assert test_s3_manager_settings.enabled == test_s3_enabled
assert test_s3_manager_settings.url
assert test_s3_manager_settings.access_key
assert test_s3_manager_settings.secret_key
assert test_s3_manager_settings.verify
assert test_s3_manager_settings.autocreate_buckets == test_s3_autocreate_buckets
| 39.72973
| 88
| 0.742857
| 170
| 1,470
| 6.082353
| 0.241176
| 0.110251
| 0.113153
| 0.142166
| 0.489362
| 0.246615
| 0.073501
| 0
| 0
| 0
| 0
| 0.020816
| 0.182993
| 1,470
| 36
| 89
| 40.833333
| 0.840133
| 0.034014
| 0
| 0
| 0
| 0
| 0.050283
| 0.018414
| 0
| 0
| 0
| 0
| 0.344828
| 1
| 0.103448
| false
| 0
| 0.103448
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a2a3c06f511758a8f808e719520fdb3ebac69cd
| 15,015
|
py
|
Python
|
examples/elCmd.py
|
mark-nicholson/python-editline
|
c23f1071c4b832a92f66e2f49142e5c5f00e500d
|
[
"BSD-3-Clause"
] | 4
|
2017-10-05T19:34:32.000Z
|
2021-05-18T23:29:44.000Z
|
examples/elCmd.py
|
mark-nicholson/python-editline
|
c23f1071c4b832a92f66e2f49142e5c5f00e500d
|
[
"BSD-3-Clause"
] | 2
|
2018-03-30T22:38:17.000Z
|
2018-03-30T22:39:13.000Z
|
examples/elCmd.py
|
mark-nicholson/python-editline
|
c23f1071c4b832a92f66e2f49142e5c5f00e500d
|
[
"BSD-3-Clause"
] | null | null | null |
"""A generic class to build line-oriented command interpreters.
Interpreters constructed with this class obey the following conventions:
1. End of file on input is processed as the command 'EOF'.
2. A command is parsed out of each line by collecting the prefix composed
of characters in the identchars member.
3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
is passed a single argument consisting of the remainder of the line.
4. Typing an empty line repeats the last command. (Actually, it calls the
method `emptyline', which may be overridden in a subclass.)
5. There is a predefined `help' method. Given an argument `topic', it
calls the command `help_topic'. With no arguments, it lists all topics
with defined help_ functions, broken into up to three topics; documented
commands, miscellaneous help topics, and undocumented commands.
6. The command '?' is a synonym for `help'. The command '!' is a synonym
for `shell', if a do_shell method exists.
7. If completion is enabled, completing commands will be done automatically,
and completing of commands args is done by calling complete_foo() with
arguments text, line, begidx, endidx. text is string we are matching
against, all returned matches must begin with it. line is the current
input line (lstripped), begidx and endidx are the beginning and end
indexes of the text being matched, which could be used to provide
different completion depending upon which position the argument is in.
The `default' method may be overridden to intercept commands for which there
is no do_ method.
The `completedefault' method may be overridden to intercept completions for
commands that have no complete_ method.
The data member `self.ruler' sets the character used to draw separator lines
in the help messages. If empty, no ruler line is drawn. It defaults to "=".
If the value of `self.intro' is nonempty when the cmdloop method is called,
it is printed out on interpreter startup. This value may be overridden
via an optional argument to the cmdloop() method.
The data members `self.doc_header', `self.misc_header', and
`self.undoc_header' set the headers used for the help function's
listings of documented functions, miscellaneous topics, and undocumented
functions respectively.
"""
import string, sys
__all__ = ["Cmd"]
PROMPT = '(Cmd) '
IDENTCHARS = string.ascii_letters + string.digits + '_'
class ElCmd:
"""A simple framework for writing line-oriented command interpreters.
These are often useful for test harnesses, administrative tools, and
prototypes that will later be wrapped in a more sophisticated interface.
A Cmd instance or subclass instance is a line-oriented interpreter
framework. There is no good reason to instantiate Cmd itself; rather,
it's useful as a superclass of an interpreter class you define yourself
in order to inherit Cmd's methods and encapsulate action methods.
"""
prompt = PROMPT
identchars = IDENTCHARS
ruler = '='
lastcmd = ''
intro = None
doc_leader = ""
doc_header = "Documented commands (type help <topic>):"
misc_header = "Miscellaneous help topics:"
undoc_header = "Undocumented commands:"
nohelp = "*** No help on %s"
use_rawinput = False
def __init__(self, completekey='tab', stdin=None, stdout=None):
"""Instantiate a line-oriented interpreter framework.
The optional argument 'completekey' is the readline name of a
completion key; it defaults to the Tab key. If completekey is
not None and the readline module is available, command completion
is done automatically. The optional arguments stdin and stdout
specify alternate input and output file objects; if not specified,
sys.stdin and sys.stdout are used.
"""
if stdin is not None:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout is not None:
self.stdout = stdout
else:
self.stdout = sys.stdout
self.cmdqueue = []
self.completekey = completekey
if not self.use_rawinput and self.completekey:
try:
import editline
self.editline = editline.editline("CMD",
self.stdin, self.stdout, sys.stderr)
self.editline.rl_completer = self.complete
except ImportError:
print("Failed to import editline")
pass
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = input(self.prompt)
except EOFError:
line = 'EOF'
else:
self.editline.prompt = self.prompt
line = self.editline.readline()
if not len(line):
line = 'EOF'
else:
line = line.rstrip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
pass
def precmd(self, line):
"""Hook method executed just before the command line is
interpreted, but after the input prompt is generated and issued.
"""
return line
def postcmd(self, stop, line):
"""Hook method executed just after a command dispatch is finished."""
return stop
def preloop(self):
"""Hook method executed once when the cmdloop() method is called."""
pass
def postloop(self):
"""Hook method executed once when the cmdloop() method is about to
return.
"""
pass
def parseline(self, line):
"""Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed.
"""
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
elif line[0] == '!':
if hasattr(self, 'do_shell'):
line = 'shell ' + line[1:]
else:
return None, None, line
i, n = 0, len(line)
while i < n and line[i] in self.identchars: i = i+1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
This may be overridden, but should not normally need to be;
see the precmd() and postcmd() methods for useful execution hooks.
The return value is a flag indicating whether interpretation of
commands by the interpreter should stop.
"""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if line == 'EOF' :
print("")
print("Bye")
sys.exit(0)
if cmd == '':
return self.default(line)
else:
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
return self.default(line)
return func(arg)
def emptyline(self):
"""Called when an empty line is entered in response to the prompt.
If this method is not overridden, it repeats the last nonempty
command entered.
"""
if self.lastcmd:
return self.onecmd(self.lastcmd)
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
If this method is not overridden, it prints an error message and
returns.
"""
self.stdout.write('*** Unknown syntax: %s (%d)\n' % (line,len(line)))
def completedefault(self, *ignored):
"""Method called to complete an input line when no command-specific
complete_*() method is available.
By default, it returns an empty list.
"""
return []
def completenames(self, text, *ignored):
dotext = 'do_'+text
return [a[3:] for a in self.get_names() if a.startswith(dotext)]
def complete(self, text, state):
"""Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
if state == 0:
origline = self.editline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = self.editline.get_begidx() - stripped
endidx = self.editline.get_endidx() - stripped
if begidx>0:
cmd, args, foo = self.parseline(line)
if cmd == '':
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, 'complete_' + cmd)
except AttributeError:
compfunc = self.completedefault
else:
compfunc = self.completenames
self.completion_matches = compfunc(text, line, begidx, endidx)
try:
return self.completion_matches[state]
except IndexError:
return None
def get_names(self):
# This method used to pull in base class attributes
# at a time dir() didn't do it yet.
return dir(self.__class__)
def complete_help(self, *args):
commands = set(self.completenames(*args))
topics = set(a[5:] for a in self.get_names()
if a.startswith('help_' + args[0]))
return list(commands | topics)
def do_help(self, arg):
'List available commands with "help" or detailed help with "help cmd".'
if arg:
# XXX check arg syntax
try:
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
self.stdout.write("%s\n"%str(doc))
return
except AttributeError:
pass
self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
return
func()
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
self.stdout.write("%s\n"%str(self.doc_leader))
self.print_topics(self.doc_header, cmds_doc, 15,80)
self.print_topics(self.misc_header, list(help.keys()),15,80)
self.print_topics(self.undoc_header, cmds_undoc, 15,80)
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
self.stdout.write("%s\n"%str(header))
if self.ruler:
self.stdout.write("%s\n"%str(self.ruler * len(header)))
self.columnize(cmds, maxcol-1)
self.stdout.write("\n")
def columnize(self, list, displaywidth=80):
"""Display a list of strings as a compact set of columns.
Each column is only as wide as necessary.
Columns are separated by two spaces (one was not legible enough).
"""
if not list:
self.stdout.write("<empty>\n")
return
nonstrings = [i for i in range(len(list))
if not isinstance(list[i], str)]
if nonstrings:
raise TypeError("list[i] not a string for i in %s"
% ", ".join(map(str, nonstrings)))
size = len(list)
if size == 1:
self.stdout.write('%s\n'%str(list[0]))
return
# Try every row count from 1 upwards
for nrows in range(1, len(list)):
ncols = (size+nrows-1) // nrows
colwidths = []
totwidth = -2
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = row + nrows*col
if i >= size:
break
x = list[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += colwidth + 2
if totwidth > displaywidth:
break
if totwidth <= displaywidth:
break
else:
nrows = len(list)
ncols = 1
colwidths = [0]
for row in range(nrows):
texts = []
for col in range(ncols):
i = row + nrows*col
if i >= size:
x = ""
else:
x = list[i]
texts.append(x)
while texts and not texts[-1]:
del texts[-1]
for col in range(len(texts)):
texts[col] = texts[col].ljust(colwidths[col])
self.stdout.write("%s\n"%str(" ".join(texts)))
class MyCmd(ElCmd,object):
def do_bleep(self, s):
print("bleep!")
def do_blob(self, s):
print("blob!")
def do_bob(self, s):
print("bob!")
def do_mods(self, s):
print(sys.modules.keys())
if __name__ == '__main__':
mc = MyCmd()
mc.cmdloop()
| 35.75
| 79
| 0.556777
| 1,795
| 15,015
| 4.606685
| 0.232869
| 0.016931
| 0.019954
| 0.013545
| 0.110291
| 0.081509
| 0.039424
| 0.019108
| 0.019108
| 0.01161
| 0
| 0.005394
| 0.357909
| 15,015
| 419
| 80
| 35.835322
| 0.852297
| 0.347985
| 0
| 0.247104
| 0
| 0
| 0.044345
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084942
| false
| 0.019305
| 0.015444
| 0.003861
| 0.23166
| 0.042471
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a2bdc47419473e5c8f04286a711270211d71607
| 2,513
|
py
|
Python
|
data_structures/linked_lists/ll-kth-from-end/ll_kth.py
|
jeremyCtown/data-structures-and-algorithms
|
d4ba8741f858fb5298f8ce560240373fb7742e20
|
[
"MIT"
] | null | null | null |
data_structures/linked_lists/ll-kth-from-end/ll_kth.py
|
jeremyCtown/data-structures-and-algorithms
|
d4ba8741f858fb5298f8ce560240373fb7742e20
|
[
"MIT"
] | null | null | null |
data_structures/linked_lists/ll-kth-from-end/ll_kth.py
|
jeremyCtown/data-structures-and-algorithms
|
d4ba8741f858fb5298f8ce560240373fb7742e20
|
[
"MIT"
] | null | null | null |
from node import Node
class LinkedList:
"""
initializes LL
"""
def __init__(self, iter=[]):
self.head = None
self._size = 0
for item in reversed(iter):
self.insert(item)
def __repr__(self):
"""
assumes head will have a val and we will need this
"""
return '<head> => {}'.format(self.head.val)
def __str__(self):
""" this is where we can see the list"""
def __len__(self):
"""
returns size of LL
"""
return self._size
def insert(self, val):
"""
basic insertion method for adding to front of LL
"""
self.head = Node(val, self.head)
self._size += 1
def append(self, val):
"""
appends node to the end of the LL
"""
new_node = Node(val, None)
current = self.head._next
while current._next is not None:
current._next = current._next._next
if current._next._next is None:
current._next._next = new_node
new_node._next is None
self._size += 1
return new_node._next
def insert_before(self, val, new_val):
"""
inserts node before node at val
"""
new_node = Node(new_val)
current = self.head._next
while current._next is not None:
if current._next.val == val:
new_node._next = current._next
current._next = new_node
self._size += 1
break
current = current._next
if current._next is None:
raise ValueError("Data not in list")
def insert_after(self, val, new_val):
"""
inserts node after node at val
"""
new_node = Node(new_val)
current = self.head._next
while current._next is not None:
if current.val == val:
new_node._next = current._next._next
current._next = new_node
self._size += 1
break
current = current._next
if current._next is None:
raise ValueError("Data not in list")
def kth_from_end(self, k):
"""
returns node at kth from end
"""
if self._size - k < 0:
raise AttributeError
current = self.head
for i in range(self._size - k - 1):
current = current._next
return current
| 25.383838
| 58
| 0.512933
| 299
| 2,513
| 4.076923
| 0.237458
| 0.153404
| 0.053322
| 0.04676
| 0.418376
| 0.418376
| 0.378999
| 0.336341
| 0.336341
| 0.336341
| 0
| 0.004633
| 0.398727
| 2,513
| 98
| 59
| 25.642857
| 0.802118
| 0.116594
| 0
| 0.410714
| 0
| 0
| 0.021611
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.160714
| false
| 0
| 0.017857
| 0
| 0.267857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a2be20f58b11b8306e1cbf1b9ec46cf140c201d
| 1,559
|
py
|
Python
|
MuonAnalysis/MomentumScaleCalibration/test/LikelihoodPdfDBReader_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
MuonAnalysis/MomentumScaleCalibration/test/LikelihoodPdfDBReader_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
MuonAnalysis/MomentumScaleCalibration/test/LikelihoodPdfDBReader_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("LIKELIHOODPDFDBREADER")
# process.load("MuonAnalysis.MomentumScaleCalibration.local_CSA08_Y_cff")
process.source = cms.Source("EmptySource",
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
process.load("Geometry.CommonTopologies.globalTrackingGeometry_cfi")
process.load("RecoMuon.DetLayers.muonDetLayerGeometry_cfi")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("RecoMuon.TrackingTools.MuonServiceProxy_cff")
# process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring()
# )
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.poolDBESSource = cms.ESSource("PoolDBESSource",
BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(2),
authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb')
),
timetype = cms.untracked.string('runnumber'),
connect = cms.string('sqlite_file:dummy2.db'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('MuScleFitLikelihoodPdfRcd'),
tag = cms.string('MuScleFitLikelihoodPdf_2_1_12')
))
)
process.LikelihoodPdfDBReaderModule = cms.EDAnalyzer(
"LikelihoodPdfDBReader"
)
process.p1 = cms.Path(process.LikelihoodPdfDBReaderModule)
| 30.568627
| 79
| 0.76331
| 155
| 1,559
| 7.593548
| 0.490323
| 0.091759
| 0.047579
| 0.032285
| 0.042481
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014451
| 0.112251
| 1,559
| 50
| 80
| 31.18
| 0.835983
| 0.099423
| 0
| 0
| 0
| 0
| 0.351216
| 0.326896
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.03125
| 0
| 0.03125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a2f97206c5b9ec5564b46970658837924dcfae3
| 2,404
|
py
|
Python
|
airflow/providers/microsoft/psrp/operators/psrp.py
|
augusto-herrmann/airflow
|
7ee4295dd3f7dba4fcd763286c7823bb1707fe99
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4
|
2021-06-26T13:37:35.000Z
|
2022-01-11T15:49:44.000Z
|
airflow/providers/microsoft/psrp/operators/psrp.py
|
augusto-herrmann/airflow
|
7ee4295dd3f7dba4fcd763286c7823bb1707fe99
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 33
|
2021-07-25T10:29:30.000Z
|
2022-03-30T04:39:06.000Z
|
airflow/providers/microsoft/psrp/operators/psrp.py
|
augusto-herrmann/airflow
|
7ee4295dd3f7dba4fcd763286c7823bb1707fe99
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import TYPE_CHECKING, List, Optional, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.microsoft.psrp.hooks.psrp import PSRPHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class PSRPOperator(BaseOperator):
"""PowerShell Remoting Protocol operator.
:param psrp_conn_id: connection id
:type psrp_conn_id: str
:param command: command to execute on remote host. (templated)
:type command: str
:param powershell: powershell to execute on remote host. (templated)
:type powershell: str
"""
template_fields: Sequence[str] = (
"command",
"powershell",
)
template_fields_renderers = {"command": "powershell", "powershell": "powershell"}
ui_color = "#901dd2"
def __init__(
self,
*,
psrp_conn_id: str,
command: Optional[str] = None,
powershell: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if not (command or powershell):
raise ValueError("Must provide either 'command' or 'powershell'")
self.conn_id = psrp_conn_id
self.command = command
self.powershell = powershell
def execute(self, context: "Context") -> List[str]:
with PSRPHook(self.conn_id) as hook:
ps = hook.invoke_powershell(
f"cmd.exe /c @'\n{self.command}\n'@" if self.command else self.powershell
)
if ps.had_errors:
raise AirflowException("Process failed")
return ps.output
| 34.342857
| 89
| 0.687604
| 302
| 2,404
| 5.387417
| 0.463576
| 0.036878
| 0.024585
| 0.019668
| 0.041795
| 0.041795
| 0.041795
| 0
| 0
| 0
| 0
| 0.004308
| 0.227537
| 2,404
| 69
| 90
| 34.84058
| 0.871836
| 0.426373
| 0
| 0
| 0
| 0
| 0.120482
| 0.016566
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.142857
| 0
| 0.342857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a2fb0bff9f0be5443177122a457a61eac9dfba3
| 17,104
|
py
|
Python
|
appengine/monorail/services/api_pb2_v1_helpers.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | 1
|
2018-01-02T05:47:07.000Z
|
2018-01-02T05:47:07.000Z
|
appengine/monorail/services/api_pb2_v1_helpers.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | null | null | null |
appengine/monorail/services/api_pb2_v1_helpers.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is govered by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Convert Monorail PB objects to API PB objects"""
import datetime
import logging
import time
from framework import framework_constants
from framework import framework_helpers
from framework import permissions
from framework import timestr
from proto import api_pb2_v1
from proto import project_pb2
from proto import tracker_pb2
from services import issue_svc
from services import project_svc
from services import user_svc
from tracker import tracker_bizobj
from tracker import tracker_helpers
def convert_project(project, config, role):
"""Convert Monorail Project PB to API ProjectWrapper PB."""
return api_pb2_v1.ProjectWrapper(
kind='monorail#project',
name=project.project_name,
externalId=project.project_name,
htmlLink='/p/%s/' % project.project_name,
summary=project.summary,
description=project.description,
role=role,
issuesConfig=convert_project_config(config))
def convert_project_config(config):
"""Convert Monorail ProjectIssueConfig PB to API ProjectIssueConfig PB."""
return api_pb2_v1.ProjectIssueConfig(
kind='monorail#projectIssueConfig',
restrictToKnown=config.restrict_to_known,
defaultColumns=config.default_col_spec.split(),
defaultSorting=config.default_sort_spec.split(),
statuses=[convert_status(s) for s in config.well_known_statuses],
labels=[convert_label(l) for l in config.well_known_labels],
prompts=[convert_template(t) for t in config.templates],
defaultPromptForMembers=config.default_template_for_developers,
defaultPromptForNonMembers=config.default_template_for_users)
def convert_status(status):
"""Convert Monorail StatusDef PB to API Status PB."""
return api_pb2_v1.Status(
status=status.status,
meansOpen=status.means_open,
description=status.status_docstring)
def convert_label(label):
"""Convert Monorail LabelDef PB to API Label PB."""
return api_pb2_v1.Label(
label=label.label,
description=label.label_docstring)
def convert_template(template):
"""Convert Monorail TemplateDef PB to API Prompt PB."""
return api_pb2_v1.Prompt(
name=template.name,
title=template.summary,
description=template.content,
titleMustBeEdited=template.summary_must_be_edited,
status=template.status,
labels=template.labels,
membersOnly=template.members_only,
defaultToMember=template.owner_defaults_to_member,
componentRequired=template.component_required)
def convert_person(user_id, cnxn, services, trap_exception=False):
"""Convert user id to API AtomPerson PB."""
if not user_id:
return None
try:
user = services.user.GetUser(cnxn, user_id)
except user_svc.NoSuchUserException as ex:
if trap_exception:
logging.warning(str(ex))
return None
else:
raise ex
days_ago = None
if user.last_visit_timestamp:
secs_ago = int(time.time()) - user.last_visit_timestamp
days_ago = secs_ago / framework_constants.SECS_PER_DAY
return api_pb2_v1.AtomPerson(
kind='monorail#issuePerson',
name=user.email,
htmlLink='https://%s/u/%d' % (framework_helpers.GetHostPort(), user_id),
last_visit_days_ago=days_ago,
email_bouncing=bool(user.email_bounce_timestamp),
vacation_message=user.vacation_message)
def convert_issue_ids(issue_ids, mar, services):
"""Convert global issue ids to API IssueRef PB."""
# missed issue ids are filtered out.
issues = services.issue.GetIssues(mar.cnxn, issue_ids)
result = []
for issue in issues:
issue_ref = api_pb2_v1.IssueRef(
issueId=issue.local_id,
projectId=issue.project_name,
kind='monorail#issueRef')
result.append(issue_ref)
return result
def convert_issueref_pbs(issueref_pbs, mar, services):
"""Convert API IssueRef PBs to global issue ids."""
if issueref_pbs:
result = []
for ir in issueref_pbs:
project_id = mar.project_id
if ir.projectId:
project = services.project.GetProjectByName(
mar.cnxn, ir.projectId)
if project:
project_id = project.project_id
try:
issue = services.issue.GetIssueByLocalID(
mar.cnxn, project_id, ir.issueId)
result.append(issue.issue_id)
except issue_svc.NoSuchIssueException:
logging.warning(
'Issue (%s:%d) does not exist.' % (ir.projectId, ir.issueId))
return result
else:
return None
def convert_issue(cls, issue, mar, services):
"""Convert Monorail Issue PB to API IssuesGetInsertResponse."""
config = services.config.GetProjectConfig(mar.cnxn, issue.project_id)
granted_perms = tracker_bizobj.GetGrantedPerms(
issue, mar.auth.effective_ids, config)
issue_project = services.project.GetProject(mar.cnxn, issue.project_id)
component_list = []
for cd in config.component_defs:
cid = cd.component_id
if cid in issue.component_ids:
component_list.append(cd.path)
cc_list = [convert_person(p, mar.cnxn, services) for p in issue.cc_ids]
cc_list = [p for p in cc_list if p is not None]
field_values_list = []
field_id_dict = {
fd.field_id: fd.field_name for fd in config.field_defs}
for fv in issue.field_values:
field_name = field_id_dict.get(fv.field_id)
if not field_name:
logging.warning('Custom field %d of project %s does not exist',
fv.field_id, issue_project.project_name)
continue
val = None
if fv.user_id:
val = _get_user_email(
services.user, mar.cnxn, fv.user_id)
elif fv.str_value:
val = fv.str_value
elif fv.int_value:
val = str(fv.int_value)
new_fv = api_pb2_v1.FieldValue(
fieldName=field_name,
fieldValue=val,
derived=fv.derived)
field_values_list.append(new_fv)
resp = cls(
kind='monorail#issue',
id=issue.local_id,
title=issue.summary,
summary=issue.summary,
projectId=issue_project.project_name,
stars=issue.star_count,
starred=services.issue_star.IsItemStarredBy(
mar.cnxn, issue.issue_id, mar.auth.user_id),
status=issue.status,
state=(api_pb2_v1.IssueState.open if
tracker_helpers.MeansOpenInProject(
tracker_bizobj.GetStatus(issue), config)
else api_pb2_v1.IssueState.closed),
labels=issue.labels,
components=component_list,
author=convert_person(issue.reporter_id, mar.cnxn, services),
owner=convert_person(issue.owner_id, mar.cnxn, services),
cc=cc_list,
updated=datetime.datetime.fromtimestamp(issue.modified_timestamp),
published=datetime.datetime.fromtimestamp(issue.opened_timestamp),
blockedOn=convert_issue_ids(issue.blocked_on_iids, mar, services),
blocking=convert_issue_ids(issue.blocking_iids, mar, services),
canComment=permissions.CanCommentIssue(
mar.auth.effective_ids, mar.perms, issue_project, issue,
granted_perms=granted_perms),
canEdit=permissions.CanEditIssue(
mar.auth.effective_ids, mar.perms, issue_project, issue,
granted_perms=granted_perms),
fieldValues=field_values_list)
if issue.closed_timestamp > 0:
resp.closed = datetime.datetime.fromtimestamp(issue.closed_timestamp)
if issue.merged_into:
resp.mergedInto=convert_issue_ids([issue.merged_into], mar, services)[0]
if issue.owner_modified_timestamp:
resp.owner_modified = datetime.datetime.fromtimestamp(
issue.owner_modified_timestamp)
if issue.status_modified_timestamp:
resp.status_modified = datetime.datetime.fromtimestamp(
issue.status_modified_timestamp)
if issue.component_modified_timestamp:
resp.component_modified = datetime.datetime.fromtimestamp(
issue.component_modified_timestamp)
return resp
def convert_comment(issue, comment, mar, services, granted_perms):
"""Convert Monorail IssueComment PB to API IssueCommentWrapper."""
can_delete = permissions.CanDelete(
mar.auth.user_id, mar.auth.effective_ids, mar.perms,
comment.deleted_by, comment.user_id, mar.project,
permissions.GetRestrictions(issue), granted_perms=granted_perms)
return api_pb2_v1.IssueCommentWrapper(
attachments=[convert_attachment(a) for a in comment.attachments],
author=convert_person(comment.user_id, mar.cnxn, services,
trap_exception=True),
canDelete=can_delete,
content=comment.content,
deletedBy=convert_person(comment.deleted_by, mar.cnxn, services,
trap_exception=True),
id=comment.sequence,
published=datetime.datetime.fromtimestamp(comment.timestamp),
updates=convert_amendments(issue, comment.amendments, mar, services),
kind='monorail#issueComment')
def convert_attachment(attachment):
"""Convert Monorail Attachment PB to API Attachment."""
return api_pb2_v1.Attachment(
attachmentId=attachment.attachment_id,
fileName=attachment.filename,
fileSize=attachment.filesize,
mimetype=attachment.mimetype,
isDeleted=attachment.deleted)
def convert_amendments(issue, amendments, mar, services):
"""Convert a list of Monorail Amendment PBs to API Update."""
result = api_pb2_v1.Update(kind='monorail#issueCommentUpdate')
for amendment in amendments:
if amendment.field == tracker_pb2.FieldID.SUMMARY:
result.summary = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.STATUS:
result.status = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.OWNER:
if len(amendment.added_user_ids) == 0:
result.owner = framework_constants.NO_USER_NAME
else:
result.owner = _get_user_email(
services.user, mar.cnxn, amendment.added_user_ids[0])
elif amendment.field == tracker_pb2.FieldID.LABELS:
result.labels = amendment.newvalue.split()
elif amendment.field == tracker_pb2.FieldID.CC:
for user_id in amendment.added_user_ids:
user_email = _get_user_email(
services.user, mar.cnxn, user_id)
result.cc.append(user_email)
for user_id in amendment.removed_user_ids:
user_email = _get_user_email(
services.user, mar.cnxn, user_id)
result.cc.append('-%s' % user_email)
elif amendment.field == tracker_pb2.FieldID.BLOCKEDON:
result.blockedOn = _append_project(
amendment.newvalue, issue.project_name)
elif amendment.field == tracker_pb2.FieldID.BLOCKING:
result.blocking = _append_project(
amendment.newvalue, issue.project_name)
elif amendment.field == tracker_pb2.FieldID.MERGEDINTO:
result.mergedInto = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.COMPONENTS:
result.components = amendment.newvalue.split()
elif amendment.field == tracker_pb2.FieldID.CUSTOM:
fv = api_pb2_v1.FieldValue()
fv.fieldName = amendment.custom_field_name
fv.fieldValue = amendment.newvalue
result.fieldValues.append(fv)
return result
def _get_user_email(user_service, cnxn, user_id):
"""Get user email."""
try:
user_email = user_service.LookupUserEmail(
cnxn, user_id)
if not user_email:
user_email = framework_constants.DELETED_USER_NAME
except user_svc.NoSuchUserException:
user_email = framework_constants.DELETED_USER_NAME
return user_email
def _append_project(issue_ids, project_name):
"""Append project name to convert <id> to <project>:<id> format."""
result = []
id_list = issue_ids.split()
for id_str in id_list:
if ':' in id_str:
result.append(id_str)
# '-' means this issue is being removed
elif id_str.startswith('-'):
result.append('-%s:%s' % (project_name, id_str[1:]))
else:
result.append('%s:%s' % (project_name, id_str))
return result
def split_remove_add(item_list):
"""Split one list of items into two: items to add and items to remove."""
list_to_add = []
list_to_remove = []
for item in item_list:
if item.startswith('-'):
list_to_remove.append(item[1:])
else:
list_to_add.append(item)
return list_to_add, list_to_remove
# TODO(sheyang): batch the SQL queries to fetch projects/issues.
def issue_global_ids(project_local_id_pairs, project_id, mar, services):
"""Find global issues ids given <project_name>:<issue_local_id> pairs."""
result = []
for pair in project_local_id_pairs:
issue_project_id = None
local_id = None
if ':' in pair:
pair_ary = pair.split(':')
project_name = pair_ary[0]
local_id = int(pair_ary[1])
project = services.project.GetProjectByName(mar.cnxn, project_name)
if not project:
raise project_svc.NoSuchProjectException(
'Project %s does not exist' % project_name)
issue_project_id = project.project_id
else:
issue_project_id = project_id
local_id = int(pair)
result.append(
services.issue.LookupIssueID(mar.cnxn, issue_project_id, local_id))
return result
def convert_group_settings(group_name, setting):
"""Convert UserGroupSettings to UserGroupSettingsWrapper."""
return api_pb2_v1.UserGroupSettingsWrapper(
groupName=group_name,
who_can_view_members=setting.who_can_view_members,
ext_group_type=setting.ext_group_type,
last_sync_time=setting.last_sync_time)
def convert_component_def(cd, mar, services):
"""Convert ComponentDef PB to Component PB."""
project_name = services.project.LookupProjectNames(
mar.cnxn, [cd.project_id])[cd.project_id]
user_ids = set()
user_ids.update(
cd.admin_ids + cd.cc_ids + [cd.creator_id] + [cd.modifier_id])
user_names_dict = services.user.LookupUserEmails(mar.cnxn, list(user_ids))
component = api_pb2_v1.Component(
componentId=cd.component_id,
projectName=project_name,
componentPath=cd.path,
description=cd.docstring,
admin=sorted([user_names_dict[uid] for uid in cd.admin_ids]),
cc=sorted([user_names_dict[uid] for uid in cd.cc_ids]),
deprecated=cd.deprecated)
if cd.created:
component.created = datetime.datetime.fromtimestamp(cd.created)
component.creator = user_names_dict[cd.creator_id]
if cd.modified:
component.modified = datetime.datetime.fromtimestamp(cd.modified)
component.modifier = user_names_dict[cd.modifier_id]
return component
def convert_component_ids(config, component_names):
"""Convert a list of component names to ids."""
component_names_lower = [name.lower() for name in component_names]
result = []
for cd in config.component_defs:
cpath = cd.path
if cpath.lower() in component_names_lower:
result.append(cd.component_id)
return result
def convert_field_values(field_values, mar, services):
"""Convert user passed in field value list to FieldValue PB, or labels."""
fv_list_add = []
fv_list_remove = []
fv_list_clear = []
label_list_add = []
label_list_remove = []
field_name_dict = {
fd.field_name: fd for fd in mar.config.field_defs}
for fv in field_values:
field_def = field_name_dict.get(fv.fieldName)
if not field_def:
logging.warning('Custom field %s of does not exist', fv.fieldName)
continue
if fv.operator == api_pb2_v1.FieldValueOperator.clear:
fv_list_clear.append(field_def.field_id)
continue
# Enum fields are stored as labels
if field_def.field_type == tracker_pb2.FieldTypes.ENUM_TYPE:
raw_val = '%s-%s' % (fv.fieldName, fv.fieldValue)
if fv.operator == api_pb2_v1.FieldValueOperator.remove:
label_list_remove.append(raw_val)
elif fv.operator == api_pb2_v1.FieldValueOperator.add:
label_list_add.append(raw_val)
else:
logging.warning('Unsupported field value operater %s', fv.operator)
else:
new_fv = tracker_pb2.FieldValue(
field_id=field_def.field_id)
if field_def.field_type == tracker_pb2.FieldTypes.USER_TYPE:
try:
new_fv.user_id = services.user.LookupUserID(mar.cnxn, fv.fieldValue)
except user_svc.NoSuchUserException:
new_fv.user_id = 0
elif field_def.field_type == tracker_pb2.FieldTypes.STR_TYPE:
new_fv.str_value = fv.fieldValue
elif field_def.field_type == tracker_pb2.FieldTypes.INT_TYPE:
new_fv.int_value = int(fv.fieldValue)
else:
logging.warning(
'Unsupported field value type %s', field_def.field_type)
if fv.operator == api_pb2_v1.FieldValueOperator.remove:
fv_list_remove.append(new_fv)
elif fv.operator == api_pb2_v1.FieldValueOperator.add:
fv_list_add.append(new_fv)
else:
logging.warning('Unsupported field value operater %s', fv.operator)
return (fv_list_add, fv_list_remove, fv_list_clear,
label_list_add, label_list_remove)
| 35.485477
| 78
| 0.716265
| 2,230
| 17,104
| 5.255157
| 0.152915
| 0.011264
| 0.015018
| 0.02048
| 0.226299
| 0.171175
| 0.134653
| 0.118952
| 0.07202
| 0.056831
| 0
| 0.005348
| 0.190949
| 17,104
| 481
| 79
| 35.559252
| 0.841523
| 0.086062
| 0
| 0.149215
| 0
| 0
| 0.026999
| 0.004833
| 0
| 0
| 0
| 0.002079
| 0
| 1
| 0.052356
| false
| 0
| 0.039267
| 0
| 0.151832
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a2fe9fc55d86e49bc69dd057bc5f300e14dbe22
| 10,782
|
py
|
Python
|
excut/feedback/rulebased_deduction/deduction_engine_extended.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | 5
|
2020-11-17T19:59:49.000Z
|
2021-09-23T23:10:39.000Z
|
excut/feedback/rulebased_deduction/deduction_engine_extended.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | null | null | null |
excut/feedback/rulebased_deduction/deduction_engine_extended.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | null | null | null |
"""
This module contains the rule-based inference (rulebased_deduction engine)
"""
import itertools
from collections import defaultdict
from itertools import chain
from excut.explanations_mining.descriptions import dump_explanations_to_file
from excut.explanations_mining.descriptions_new import Description2, Atom, load_from_file
from excut.explanations_mining.explaining_engines_extended import PathBasedClustersExplainerExtended
from excut.explanations_mining.simple_miner.description_miner_extended import DescriptionMinerExtended, ExplanationStructure
from excut.kg.kg_query_interface_extended import EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended
from excut.kg.kg_indexing import Indexer
from excut.kg.utils.data_formating import n3_repr
from excut.utils.logging import logger
from excut.kg.utils.Constants import DEFUALT_AUX_RELATION
from excut.clustering import target_entities as tes
class Prediction:
"""
An object to represent the prediction of the rules
:ivar triple: the predicted triple
:ivar all_sources: all rules that predicted the same triple
"""
# def __init__(self, triple: tuple, source_description=Description(), all_sources=None):
def __init__(self, triple=None, sources=None):
self.triple = triple
# self.source_description = source_descriptionf
self.all_sources = sources if sources else list() # sources if sources else {source_description}
def get_subject(self):
return self.triple[0]
def get_object(self):
return self.triple[2]
def get_quality(self, measure='x_coverage', method=max):
# return self.source_description.get_quality(measure)
return method([source.get_quality(measure) for source in self.all_sources])
def get_main_description(self, measure='x_coverage', method=max):
return method(self.all_sources, key=lambda d: d.get_quality(measure))
def __str__(self):
return str(self.triple) + '<<' + str(self.get_main_description())
def __repr__(self):
return "%s\t(\t%s,%s)" % (self.__class__.__name__, repr(self.triple), repr(self.all_sources))
def __eq__(self, other):
return other.triple == self.triple
def __hash__(self):
return hash(self.triple)
class DeductionEngine():
"""
Abstract rulebased_deduction/inference engine.
"""
def __init__(self, **kwargs):
pass
def infer(self, descriptions, recursive=False, topk=-1):
pass
class SparqlBasedDeductionEngineExtended(DeductionEngine):
"""
Deduction engine that converts the rules to sparql and fire them over the KG.
The rule-based_deduction takes care of consolidating similar predictions
"""
def __init__(self, kg_query_interface: KGQueryInterfaceExtended, relation=DEFUALT_AUX_RELATION, quality='x_coverage', quality_aggregation=max):
"""
:param kg_query_interface: interface for the KG.
:param relation: the relation used in the predicted triple (optional)
:param quality: objective quality measure for ranking the predictions (optional) by default
the exclusive coverage of the rules is used
:param quality_aggregation: the methd used for aggregating the score if multiple rules infers the same fact
(optional) by default max is used.
"""
super(SparqlBasedDeductionEngineExtended, self).__init__()
self.relation = relation
self.query_executer = kg_query_interface
self.quality = quality
self.quality_aggregation = quality_aggregation
self.labels_indexer=Indexer(store=kg_query_interface.type,
endpoint=kg_query_interface.endpoint,
graph= kg_query_interface.labels_graph,
identifier=kg_query_interface.labels_identifier)
def infer(self, descriptions_list, target_entities=None, min_quality=0, topk=-1, output_filepath=None,
clear_target_entities=True):
"""
Infer new facts for a giving set of descriptions
:param descriptions_list: list of explantions/descriptions rules
:param target_entities: entities and their labels for which predictions are generated
:param min_quality: minimum aggregated quality for the predictions
:param topk: k *distinct* highest quality predictions per entity,
:param output_filepath: predictions output file.
:param clear_target_entities: clear indexed target entities after done inference
:return: dictionary of predicted entity-clusters assignments
"""
if isinstance(descriptions_list,dict):
descriptions_list=list(itertools.chain.from_iterable(descriptions_list.values()))
if target_entities:
self.labels_indexer.index_triples(target_entities)
self.relation=target_entities.get_relation()
predictions = list(map(self._infer_single, descriptions_list))
per_entity_predictions = self.consolidate(predictions)
per_entity_predictions = self._merge_and_sort_cut(per_entity_predictions, min_quality, topk=topk)
if output_filepath:
dump_predictions_map(per_entity_predictions, output_filepath, triple_format=True, topk=topk, with_weight=True,
with_description=False, quality=self.quality)
if target_entities and clear_target_entities:
self.labels_indexer.drop()
return per_entity_predictions
def consolidate(self, predictions):
"""
Combine predictions from different rules
:param predictions: list of generated predictions
:return: combined single prediction with several sources for equivalent predictions
:rtype: dict
"""
# per_var_predictions = defaultdict(lambda: defaultdict(list))
# for p in chain.from_iterable(predictions):
# per_var_predictions[p.get_subject()][p.get_object()].append(p)
per_entity_predictions = defaultdict(lambda: defaultdict(Prediction))
for p in list(chain.from_iterable(predictions)):
cons_pred = per_entity_predictions[p.get_subject()][p.get_object()]
cons_pred.triple = p.triple
cons_pred.all_sources += p.all_sources
return per_entity_predictions
def _merge_and_sort_cut(self, per_entity_prediction, threshold=0, topk=-1):
"""
Merge the the inferred facts in case of functional predicates
:param per_entity_prediction:
:return:
"""
def quality_method(p):
return p.get_quality(self.quality, self.quality_aggregation)
per_entity_prediction_filtered = defaultdict(list)
for sub, per_obj_predictions in per_entity_prediction.items():
# print([(k, p.triple[2], qaulity_method(p)) for k, p in per_obj_predictions.items()])
merged_predictions = list(
filter(lambda p: quality_method(p) > threshold, list(per_obj_predictions.values())))
merged_predictions.sort(key=quality_method, reverse=True)
include = topk if topk > 0 else len(merged_predictions)
per_entity_prediction_filtered[sub] = merged_predictions[:include]
return per_entity_prediction_filtered
def _infer_single(self, description: Description2):
"""
Infer new facts for the given Description
:param description:
:return:
"""
bindings = self.query_executer.get_arguments_bindings(description,
restriction_pattern=Description2(body=[Atom('?x',
self.relation,
'?z')]))
head = description.head
# only supports p(?x,CONSTANT)
predictions = [Prediction((b, head.predicate, head.object), [description]) for b in bindings]
return predictions
def dump_predictions_map(per_var_predictions, out_filepath, triple_format=True, topk=-1, with_weight=True,
with_description=False, quality='x_coverage'):
"""
Writes the predictions to two files, the first is human readable and the other with .parsable extension that can be
parsed in python.
:param per_var_predictions:
:param out_filepath:
:param triple_format:
:param topk:
:param with_weight:
:param with_description:
:return:
"""
out_file_parsable = out_filepath + '.parsable'
out_filepath_with_type = out_filepath + ('.%s' % quality if len(quality) > 0 else '')
with open(out_filepath_with_type, 'w') as out_file:
for var, predictions in per_var_predictions.items():
if topk > 0:
predictions = predictions[:topk]
for p in predictions:
if triple_format:
# I only output normalized_coverage
out_str = n3_repr(p.triple) + ('\t%f' % p.get_quality(quality) if with_weight else '') + (
'\t%s' % p.source_description if with_description else '')
else:
out_str = str(p)
out_file.write(out_str)
out_file.write('\n')
with open(out_file_parsable + ('.%s' % quality if len(quality) > 0 else ''), 'w') as out_file:
out_file.write('\n'.join(
map(str, chain.from_iterable(map(lambda l: l[:topk] if topk > 0 else l, per_var_predictions.values())))))
return out_filepath_with_type
if __name__ == '__main__':
target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv')
vos_executer = EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql',
['http://yago-expr.org', 'http://yago-expr.org.types'],
labels_identifier='http://yago-expr.org.labels')
explainer=PathBasedClustersExplainerExtended(vos_executer, language_bias={'max_length': 4, 'structure': ExplanationStructure.TREE})
explans=explainer.explain(target_entities,
output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt')
ded = SparqlBasedDeductionEngineExtended(vos_executer)
per_var_predictions = ded.infer(explans, target_entities,
output_filepath='/scratch/GW/pool0/gadelrab/ExDEC/tmp/predictions_tree.tsv')
logger.info("Total variables with predictions subjects: %i", len(per_var_predictions))
| 42.785714
| 147
| 0.667501
| 1,233
| 10,782
| 5.583942
| 0.235199
| 0.019608
| 0.018591
| 0.015686
| 0.098765
| 0.052433
| 0.038635
| 0
| 0
| 0
| 0
| 0.003586
| 0.249861
| 10,782
| 251
| 148
| 42.956175
| 0.847676
| 0.235856
| 0
| 0.03252
| 0
| 0
| 0.055414
| 0.022293
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146341
| false
| 0.01626
| 0.105691
| 0.073171
| 0.390244
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a3060eba97a54372d78e04129b03dceb1e1d40e
| 916
|
py
|
Python
|
dataloader/viperlist_train.py
|
urasakikeisuke/rigidmask
|
4bb781102218dfd11efa767e2d0ba987d9949fd1
|
[
"MIT"
] | 138
|
2021-01-12T03:02:04.000Z
|
2022-03-30T07:14:15.000Z
|
dataloader/viperlist_train.py
|
urasakikeisuke/rigidmask
|
4bb781102218dfd11efa767e2d0ba987d9949fd1
|
[
"MIT"
] | 12
|
2021-02-02T14:19:30.000Z
|
2022-03-28T01:23:44.000Z
|
dataloader/viperlist_train.py
|
urasakikeisuke/rigidmask
|
4bb781102218dfd11efa767e2d0ba987d9949fd1
|
[
"MIT"
] | 14
|
2021-01-13T01:31:34.000Z
|
2022-01-30T14:48:06.000Z
|
import torch.utils.data as data
from PIL import Image
import os
import os.path
import numpy as np
import pdb
import glob
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def dataloader(filepath):
left_fold = 'image_2/'
train = glob.glob(filepath+left_fold+'/0*.jpg')
train = sorted(train)
l0_train = []
l1_train = []
flow_train = []
for img in train:
img1 = ('%s_%s.jpg'%(img.rsplit('_',1)[0],'%05d'%(1+int(img.split('.')[0].split('_')[-1])) ))
flowp = img.replace('.jpg', '.png').replace('image_2','flow_occ')
if (img1 in train and len(glob.glob(flowp))>0 and ('01000' not in img)):
l0_train.append(img)
l1_train.append(img1)
flow_train.append(flowp)
return l0_train, l1_train, flow_train
| 24.756757
| 97
| 0.634279
| 138
| 916
| 4.057971
| 0.42029
| 0.0375
| 0.057143
| 0.05
| 0.082143
| 0.082143
| 0
| 0
| 0
| 0
| 0
| 0.033157
| 0.176856
| 916
| 36
| 98
| 25.444444
| 0.709549
| 0
| 0
| 0
| 0
| 0
| 0.110262
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.25
| 0.035714
| 0.392857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a328e84b47f7a5de237d63ba7bea1c7be663611
| 6,282
|
py
|
Python
|
strava.py
|
AartGoossens/streamlit-activity-viewer
|
b43f157d8bee596908c4f2222be9bb0d8bd9b9e8
|
[
"MIT"
] | 4
|
2021-05-21T11:34:00.000Z
|
2022-02-17T18:22:06.000Z
|
strava.py
|
AartGoossens/streamlit-activity-viewer
|
b43f157d8bee596908c4f2222be9bb0d8bd9b9e8
|
[
"MIT"
] | null | null | null |
strava.py
|
AartGoossens/streamlit-activity-viewer
|
b43f157d8bee596908c4f2222be9bb0d8bd9b9e8
|
[
"MIT"
] | null | null | null |
import base64
import os
import arrow
import httpx
import streamlit as st
import sweat
from bokeh.models.widgets import Div
APP_URL = os.environ["APP_URL"]
STRAVA_CLIENT_ID = os.environ["STRAVA_CLIENT_ID"]
STRAVA_CLIENT_SECRET = os.environ["STRAVA_CLIENT_SECRET"]
STRAVA_AUTHORIZATION_URL = "https://www.strava.com/oauth/authorize"
STRAVA_API_BASE_URL = "https://www.strava.com/api/v3"
DEFAULT_ACTIVITY_LABEL = "NO_ACTIVITY_SELECTED"
STRAVA_ORANGE = "#fc4c02"
@st.cache(show_spinner=False)
def load_image_as_base64(image_path):
with open(image_path, "rb") as f:
contents = f.read()
return base64.b64encode(contents).decode("utf-8")
def powered_by_strava_logo():
base64_image = load_image_as_base64("./static/api_logo_pwrdBy_strava_horiz_light.png")
st.markdown(
f'<img src="data:image/png;base64,{base64_image}" width="100%" alt="powered by strava">',
unsafe_allow_html=True,
)
def authorization_url():
request = httpx.Request(
method="GET",
url=STRAVA_AUTHORIZATION_URL,
params={
"client_id": STRAVA_CLIENT_ID,
"redirect_uri": APP_URL,
"response_type": "code",
"approval_prompt": "auto",
"scope": "activity:read_all"
}
)
return request.url
def login_header(header=None):
strava_authorization_url = authorization_url()
if header is None:
base = st
else:
col1, _, _, button = header
base = button
with col1:
powered_by_strava_logo()
base64_image = load_image_as_base64("./static/btn_strava_connectwith_orange@2x.png")
base.markdown(
(
f"<a href=\"{strava_authorization_url}\">"
f" <img alt=\"strava login\" src=\"data:image/png;base64,{base64_image}\" width=\"100%\">"
f"</a>"
),
unsafe_allow_html=True,
)
def logout_header(header=None):
if header is None:
base = st
else:
_, col2, _, button = header
base = button
with col2:
powered_by_strava_logo()
if base.button("Log out"):
js = f"window.location.href = '{APP_URL}'"
html = f"<img src onerror=\"{js}\">"
div = Div(text=html)
st.bokeh_chart(div)
def logged_in_title(strava_auth, header=None):
if header is None:
base = st
else:
col, _, _, _ = header
base = col
first_name = strava_auth["athlete"]["firstname"]
last_name = strava_auth["athlete"]["lastname"]
col.markdown(f"*Welcome, {first_name} {last_name}!*")
@st.cache(show_spinner=False, suppress_st_warning=True)
def exchange_authorization_code(authorization_code):
response = httpx.post(
url="https://www.strava.com/oauth/token",
json={
"client_id": STRAVA_CLIENT_ID,
"client_secret": STRAVA_CLIENT_SECRET,
"code": authorization_code,
"grant_type": "authorization_code",
}
)
try:
response.raise_for_status()
except httpx.HTTPStatusError:
st.error("Something went wrong while authenticating with Strava. Please reload and try again")
st.experimental_set_query_params()
st.stop()
return
strava_auth = response.json()
return strava_auth
def authenticate(header=None, stop_if_unauthenticated=True):
query_params = st.experimental_get_query_params()
authorization_code = query_params.get("code", [None])[0]
if authorization_code is None:
authorization_code = query_params.get("session", [None])[0]
if authorization_code is None:
login_header(header=header)
if stop_if_unauthenticated:
st.stop()
return
else:
logout_header(header=header)
strava_auth = exchange_authorization_code(authorization_code)
logged_in_title(strava_auth, header)
st.experimental_set_query_params(session=authorization_code)
return strava_auth
def header():
col1, col2, col3 = st.beta_columns(3)
with col3:
strava_button = st.empty()
return col1, col2, col3, strava_button
@st.cache(show_spinner=False)
def get_activities(auth, page=1):
access_token = auth["access_token"]
response = httpx.get(
url=f"{STRAVA_API_BASE_URL}/athlete/activities",
params={
"page": page,
},
headers={
"Authorization": f"Bearer {access_token}",
},
)
return response.json()
def activity_label(activity):
if activity["name"] == DEFAULT_ACTIVITY_LABEL:
return ""
start_date = arrow.get(activity["start_date_local"])
human_readable_date = start_date.humanize(granularity=["day"])
date_string = start_date.format("YYYY-MM-DD")
return f"{activity['name']} - {date_string} ({human_readable_date})"
def select_strava_activity(auth):
col1, col2 = st.beta_columns([1, 3])
with col1:
page = st.number_input(
label="Activities page",
min_value=1,
help="The Strava API returns your activities in chunks of 30. Increment this field to go to the next page.",
)
with col2:
activities = get_activities(auth=auth, page=page)
if not activities:
st.info("This Strava account has no activities or you ran out of pages.")
st.stop()
default_activity = {"name": DEFAULT_ACTIVITY_LABEL, "start_date_local": ""}
activity = st.selectbox(
label="Select an activity",
options=[default_activity] + activities,
format_func=activity_label,
)
if activity["name"] == DEFAULT_ACTIVITY_LABEL:
st.write("No activity selected")
st.stop()
return
activity_url = f"https://www.strava.com/activities/{activity['id']}"
st.markdown(
f"<a href=\"{activity_url}\" style=\"color:{STRAVA_ORANGE};\">View on Strava</a>",
unsafe_allow_html=True
)
return activity
@st.cache(show_spinner=False, max_entries=30, allow_output_mutation=True)
def download_activity(activity, strava_auth):
with st.spinner(f"Downloading activity \"{activity['name']}\"..."):
return sweat.read_strava(activity["id"], strava_auth["access_token"])
| 27.552632
| 120
| 0.641356
| 773
| 6,282
| 4.956016
| 0.284605
| 0.048812
| 0.014618
| 0.01775
| 0.26651
| 0.146176
| 0.088228
| 0.066301
| 0.066301
| 0.027669
| 0
| 0.01299
| 0.24021
| 6,282
| 227
| 121
| 27.674009
| 0.78965
| 0
| 0
| 0.238372
| 0
| 0.011628
| 0.2071
| 0.031359
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075581
| false
| 0
| 0.040698
| 0
| 0.19186
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a3376e3801e88076e88946747dfd57658118979
| 3,395
|
py
|
Python
|
appliance/src/ufw_interface.py
|
reap3r/nmfta-bouncer
|
a178244dbf0b8a165aabc02a5d1ba05006f9ec22
|
[
"Apache-2.0"
] | 1
|
2019-01-10T00:31:09.000Z
|
2019-01-10T00:31:09.000Z
|
appliance/src/ufw_interface.py
|
nmfta-repo/nmfta-bouncer
|
a178244dbf0b8a165aabc02a5d1ba05006f9ec22
|
[
"Apache-2.0"
] | 21
|
2019-02-28T14:23:11.000Z
|
2020-07-07T20:46:37.000Z
|
appliance/src/ufw_interface.py
|
nmfta-repo/nmfta-bouncer
|
a178244dbf0b8a165aabc02a5d1ba05006f9ec22
|
[
"Apache-2.0"
] | 2
|
2019-05-07T13:16:49.000Z
|
2020-06-23T13:49:01.000Z
|
#!/usr/bin/env python
#shamelessy stolen from: https://gitlab.com/dhj/easyufw
# A thin wrapper over the thin wrapper that is ufw
# Usage:
# import easyufw as ufw
# ufw.disable() # disable firewall
# ufw.enable() # enable firewall
# ufw.allow() # default allow -- allow all
# ufw.allow(22) # allow port 22, any protocol
# ufw.allow(22,'tcp') # allow port 22, tcp protocol
# ufw.allow('22/tcp') # allow port 22, tcp protocol
# ufw.allow(53,'udp') # allow port 53, udp protocol
# ufw.allow(53,'udp') # allow port 53, udp protocol
# ufw.deny() # default deny -- deny all
# ufw.deny(22,'tcp') # deny port 22, tcp protocol
# ufw.delete(22) # delete rules referencing port 22
# ufw.reset() # restore defaults
# ufw.status() # return status string (default verbose=True)
# ufw.run("allow 22") # directly run command as if from command line
import ufw.frontend
import ufw.common
import gettext
progName = ufw.common.programName
gettext.install(progName)#, unicode=True) # for i18n; fixes '_' not defined
ui = ufw.frontend.UFWFrontend(False) # no dryrun -- do it live
backend = ui.backend
parse_command = ufw.frontend.parse_command
def _parse(actionstr):
# parse commands like "allow 22", "reset", "default allow"
argv = [progName]
argv.extend(actionstr.split(' ')) # generate bogus argv to parse
pr = parse_command(argv)
return pr
def run(actionstr, force=False):
# run command with an explicit force argument
pr = _parse(actionstr)
rule = pr.data.get('rule','') # commands like reset don't have a rule
iptype = pr.data.get('iptype','')
return ui.do_action(pr.action,rule,iptype,force)
def reset(force=True):
run('reset',force=force)
def enable():
ui.set_enabled(True)
def disable():
ui.set_enabled(False)
def allow(port=None, protocol=None):
# port int; protocol str ['tcp','udp']
pp = None
if port is not None:
pp = "" # port and protocol string
pp += str(port)
if protocol is not None:
pp += '/' + protocol
_allow(pp)
def _allow(pp=None):
# pp = port and protocol string ['22','22/tcp','53/udp']
# port without protocol includes all protocols
if pp is None:
run('default allow')
else:
run('allow ' + pp)
def deny(port=None, protocol=None):
# port int; protocol str ['tcp','udp']
pp = None
if port is not None:
pp = "" # port and protocol string
pp += str(port)
if protocol is not None:
pp += '/' + protocol
_deny(pp)
def _deny(pp=None):
# pp = port and protocol string
if pp is None:
run('default deny')
else:
run('deny ' + pp)
def delete(port):
# delete all rules by destination port
while _delete(port): pass # while ports deleted re-enumerate and continue
def _delete(port):
for i,rule in enumerate(backend.get_rules()):
rule_port = None
try:
rule_port = int(rule.dport)
except:
rule_port = None
if rule_port is not None and port == rule_port:
run("delete " + str(i+1), force=True)
return True # delete one rule; enumeration changes after delete
return False
def status(verbose=True):
cmd = 'status'
if verbose:
cmd += ' verbose'
return run(cmd)
| 29.267241
| 77
| 0.620619
| 471
| 3,395
| 4.428875
| 0.276008
| 0.016779
| 0.021572
| 0.021093
| 0.24976
| 0.240173
| 0.220997
| 0.193193
| 0.193193
| 0.193193
| 0
| 0.016328
| 0.260383
| 3,395
| 115
| 78
| 29.521739
| 0.814417
| 0.443888
| 0
| 0.264706
| 0
| 0
| 0.040805
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0.014706
| 0.044118
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a33b81f30e4d4f06b72174eedd941785bd5b519
| 1,325
|
py
|
Python
|
test/libsalt/test_vehicle.py
|
etri-city-traffic-brain/traffic-simulator
|
6d5061febeaef484388b2b5aee14d9894099d98a
|
[
"Apache-2.0"
] | 8
|
2020-08-27T05:44:05.000Z
|
2021-12-27T05:11:17.000Z
|
test/libsalt/test_vehicle.py
|
etri-city-traffic-brain/traffic-simulator
|
6d5061febeaef484388b2b5aee14d9894099d98a
|
[
"Apache-2.0"
] | null | null | null |
test/libsalt/test_vehicle.py
|
etri-city-traffic-brain/traffic-simulator
|
6d5061febeaef484388b2b5aee14d9894099d98a
|
[
"Apache-2.0"
] | 1
|
2020-11-27T05:17:29.000Z
|
2020-11-27T05:17:29.000Z
|
import libsalt
def test(salt_scenario):
libsalt.start(salt_scenario)
libsalt.setCurrentStep(25200)
step = libsalt.getCurrentStep()
while step <= 36000:
if (step % 100 == 0):
print("Simulation Step: ", step)
test_funcs()
libsalt.simulationStep()
step = libsalt.getCurrentStep()
libsalt.close()
print("Python: Simulation End!!!")
def test_funcs():
standbys = libsalt.vehicle.getStandbyVehicles()
runnings = libsalt.vehicle.getRunningVehicles()
print("#Running Vehicles: ", len(runnings))
#for vehicle in runnings:
# print("\t", vehicle.toString())
#for vehicle in standbys:
# print("\t", vehicle.toString())
# for vehicle in runnings:
# print("Running Vehicle)", vehicle.id, ":", libsalt.vehicle.getRoute(vehicle.id).toString())
# print("Running Vehicle)", vehicle.id, ":", vehicle.toString())
#print("#Standby Vehicles: ", len(standbys))
#for vehicle in standbys:
# print("Standby Vehicle)", vehicle.id, ":", libsalt.vehicle.getRouteString(vehicle.id))
#print("Standby Vehicle)", vehicle.id, ":", vehicle.toString())
if __name__ == "__main__":
salt_scenario = r"/home/mclee/project/traffic-simulator/data/dj_sample_data/2020-dj_sample.json"
test(salt_scenario)
| 33.125
| 101
| 0.648302
| 142
| 1,325
| 5.929577
| 0.366197
| 0.064133
| 0.057007
| 0.047506
| 0.346793
| 0.078385
| 0.078385
| 0
| 0
| 0
| 0
| 0.017013
| 0.201509
| 1,325
| 39
| 102
| 33.974359
| 0.778828
| 0.394717
| 0
| 0.1
| 0
| 0
| 0.18481
| 0.097468
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.05
| 0
| 0.15
| 0.15
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a362d5ac32fdf3188152eb3fc2c0b00c7db0458
| 3,080
|
py
|
Python
|
vunit/test/unit/test_tokenizer.py
|
bjacobs1/vunit
|
a7f7717a172855ea7852296bb768370d50cfc992
|
[
"Artistic-2.0"
] | 1
|
2020-08-30T08:30:02.000Z
|
2020-08-30T08:30:02.000Z
|
vunit/test/unit/test_tokenizer.py
|
smgl9/vunit
|
9933d9a1ae600cc241894244361282dd7f7227d7
|
[
"Artistic-2.0"
] | null | null | null |
vunit/test/unit/test_tokenizer.py
|
smgl9/vunit
|
9933d9a1ae600cc241894244361282dd7f7227d7
|
[
"Artistic-2.0"
] | null | null | null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014-2018, Lars Asplund lars.anders.asplund@gmail.com
"""
Test of the general tokenizer
"""
from unittest import TestCase
from vunit.parsing.tokenizer import describe_location
from vunit.test.mock_2or3 import mock
class TestTokenizer(TestCase):
"""
Test of the general tokenizer
"""
def test_describes_single_char_location(self):
self.assertEqual(
_describe_location("""\
S
"""), """\
at filename0 line 1:
S
~""")
def test_describes_single_char_location_within(self):
self.assertEqual(
_describe_location("""\
S
"""), """\
at filename0 line 1:
S
~""")
def test_describes_multi_char_location(self):
self.assertEqual(
_describe_location("""\
S E
"""), """\
at filename0 line 1:
S E
~~~""")
def test_describes_multi_char_location_within(self):
self.assertEqual(
_describe_location("""\
S E
"""), """\
at filename0 line 1:
S E
~~~""")
def test_describes_multi_line_location(self):
self.assertEqual(
_describe_location("""\
S____
E
"""), """\
at filename0 line 1:
S____
~~~~~""")
def test_describes_multi_file_location(self):
self.assertEqual(
_describe_location("""\
S__E""", """\
SE"""), """\
from filename0 line 2:
S__E
~~~~
at filename1 line 3:
SE
~~""")
def test_describe_location_none(self):
self.assertEqual(describe_location(None),
"Unknown location")
def test_describe_missing_location(self):
self.assertEqual(describe_location((("missing.svh", (0, 0)), None)),
"Unknown location in missing.svh")
def test_describe_none_filename_location(self):
self.assertEqual(describe_location(((None, (0, 0)), None)),
"Unknown Python string")
def _describe_location(*codes):
"""
Helper to test describe_location
"""
contents = {}
location = None
for idx, code in enumerate(codes):
filename = "filename%i" % idx
contents[filename] = code
start = code.index("S")
if "E" in code:
end = code.index("E")
else:
end = start
location = ((filename, (start, end)), location)
with mock.patch("vunit.parsing.tokenizer.read_file", autospec=True) as mock_read_file:
with mock.patch("vunit.parsing.tokenizer.file_exists", autospec=True) as mock_file_exists:
def file_exists_side_effect(filename):
return filename in contents
def read_file_side_effect(filename):
return contents[filename]
mock_file_exists.side_effect = file_exists_side_effect
mock_read_file.side_effect = read_file_side_effect
retval = describe_location(location=location)
return retval
| 24.64
| 98
| 0.622727
| 367
| 3,080
| 4.972752
| 0.275204
| 0.12274
| 0.093699
| 0.133151
| 0.415342
| 0.387945
| 0.264658
| 0.264658
| 0.231233
| 0.19726
| 0
| 0.014023
| 0.259091
| 3,080
| 124
| 99
| 24.83871
| 0.785714
| 0.114935
| 0
| 0.37931
| 0
| 0
| 0.164307
| 0.025335
| 0
| 0
| 0
| 0
| 0.103448
| 1
| 0.137931
| false
| 0
| 0.034483
| 0.022989
| 0.218391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a3701a8c1a4900d3599d12821235a51d12e4737
| 4,926
|
py
|
Python
|
memeapp/views.py
|
barbaramootian/Memes-app
|
4ffa2da997758ee4f35dc21e755e3db242b8654f
|
[
"MIT",
"Unlicense"
] | null | null | null |
memeapp/views.py
|
barbaramootian/Memes-app
|
4ffa2da997758ee4f35dc21e755e3db242b8654f
|
[
"MIT",
"Unlicense"
] | null | null | null |
memeapp/views.py
|
barbaramootian/Memes-app
|
4ffa2da997758ee4f35dc21e755e3db242b8654f
|
[
"MIT",
"Unlicense"
] | null | null | null |
from django.shortcuts import render,redirect
from django.contrib.auth.models import User
from django.contrib import messages
from .forms import PictureUploadForm,CommentForm
from .models import Image,Profile,Likes,Comments
from django.contrib.auth.decorators import login_required
from django.contrib .auth import authenticate,login,logout
from django.contrib.auth.forms import UserCreationForm
from datetime import datetime
def index(request):
images=Image.objects.all()
context={'images':images}
return render(request,'memeapp/index.html',context)
def registerPage(request):
form=UserCreationForm()
if request.method == "POST":
form_results=UserCreationForm(request.POST)
if form_results.is_valid():
user =form_results.save(commit=False)
user.username=user.username.lower()
user.save()
login(request,user)
return redirect('index')
else:
messages.error(request, 'Error occured during registration')
context = {'reg_form':form}
return render(request, 'memeapp/auth.html',context)
def loginPage(request):
page='login'
if request.user.is_authenticated:
return redirect('index')
if request.method == "POST":
username=request.POST.get('username').lower()
password=request.POST.get('password')
try:
user=User.objects.get(username=username)
except:
messages.error(request, 'User does not exist')
user=authenticate(request,username=username,password=password)
if user is not None:
login(request,user)
return redirect('index')
else:
messages.error(request, 'Username OR Password does not exist')
context={'page':page}
return render(request, 'memeapp/auth.html', context)
def logoutUser(request):
logout(request)
return redirect('index')
@login_required(login_url='login')
def uploadPicture(request):
form = PictureUploadForm()
if request.method == "POST":
form_results = PictureUploadForm(request.POST,request.FILES)
if form_results.is_valid():
form_results.save()
return redirect('index')
context = {"form": form}
return render(request, 'memeapp/upload_picture.html', context)
@login_required(login_url='login')
def my_images(request):
current_user = request.user
images = Profile.objects.filter(user_id=current_user.id).first()
profiles = Image.objects.filter(user_id=current_user.id)
return render(request, 'memeapp/profile.html', {"profile": images,"images":profiles})
@login_required(login_url='login')
def each_image(request, id):
image = Image.objects.get(id=id)
return render(request, 'memeapp/image_details.html', {'image': image})
@login_required(login_url='login')
def like_picture(request, id):
likes = Likes.objects.filter(image_id=id).first()
if Likes.objects.filter(image_id=id, user_id=request.user.id).exists():
likes.delete()
image = Image.objects.get(id=id)
if image.likes_number == 0:
image.likes_number = 0
image.save()
else:
image.likes_number -= 1
image.save()
return redirect('/')
else:
likes = Likes(image_id=id, user_id=request.user.id)
likes.save()
image = Image.objects.get(id=id)
image.likes_number = image.likes_number + 1
image.save()
return redirect('/')
@login_required(login_url='login')
def comment(request,pk):
profile = Image.objects.get(pk=pk)
form_results = CommentForm(request.POST,instance=profile)
if request.method == "POST":
if form_results.is_valid():
user = request.user
comment= form_results.cleaned_data['comment']
comment_content = Comments(user=user, image=profile, comment=comment, created_on=datetime.now())
comment_content.save()
profile.comments_number = profile.comments_number + 1
profile.save()
return redirect('index')
else:
print('form is invalid')
else:
form_results = CommentForm
context = {'form':form_results,'image':profile}
return render(request,'memeapp/comments.html',context)
def search(request):
title = "Search"
if 'search_query' in request.GET and request.GET["search_query"]:
search_term = request.GET.get("search_query").lower()
searched_results = Image.search_image(search_term)
message = f"{search_term}"
context = {'message': message, 'results': searched_results, 'title': title}
return render(request, 'memeapp/search.html', context)
else:
messages.error(request, "You haven't searched for any term")
message = "You haven't searched for any term"
return render(request, 'memeapp/search.html', {"message": message})
| 35.695652
| 108
| 0.664434
| 586
| 4,926
| 5.479522
| 0.18942
| 0.037683
| 0.053254
| 0.072874
| 0.33105
| 0.296481
| 0.160698
| 0.106509
| 0.036749
| 0.036749
| 0
| 0.001295
| 0.215997
| 4,926
| 137
| 109
| 35.956204
| 0.83014
| 0
| 0
| 0.308333
| 0
| 0
| 0.118376
| 0.015025
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0.025
| 0.075
| 0
| 0.3
| 0.008333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a3712991b980a6711c1dba6adf131ce5c5af892
| 4,997
|
py
|
Python
|
sparv/modules/hist/diapivot.py
|
spraakbanken/sparv-pipeline
|
7293d42c577afdaf01ce8a936743f8b83d6eb962
|
[
"MIT"
] | 17
|
2018-09-21T07:01:45.000Z
|
2022-02-24T23:26:49.000Z
|
sparv/modules/hist/diapivot.py
|
spraakbanken/sparv-pipeline
|
7293d42c577afdaf01ce8a936743f8b83d6eb962
|
[
"MIT"
] | 146
|
2018-11-13T19:13:25.000Z
|
2022-03-31T09:57:56.000Z
|
sparv/modules/hist/diapivot.py
|
spraakbanken/sparv-pipeline
|
7293d42c577afdaf01ce8a936743f8b83d6eb962
|
[
"MIT"
] | 5
|
2019-02-14T00:50:38.000Z
|
2021-03-29T15:37:41.000Z
|
"""Create diapivot annotation."""
import logging
import pickle
import xml.etree.ElementTree as etree
import sparv.util as util
from sparv import Annotation, Model, ModelOutput, Output, annotator, modelbuilder
log = logging.getLogger(__name__)
PART_DELIM1 = "^1"
# @annotator("Diapivot annotation", language=["swe-1800"])
def diapivot_annotate(out: Output = Output("<token>:hist.diapivot", description="SALDO IDs corresponding to lemgrams"),
lemgram: Annotation = Annotation("<token>:saldo.lemgram"),
model: Model = Model("hist/diapivot.pickle")):
"""Annotate each lemgram with its corresponding saldo_id according to model.
Args:
out (str, optional): Resulting annotation file.
Defaults to Output("<token>:hist.diapivot", description="SALDO IDs corresponding to lemgrams").
lemgram (str, optional): Existing lemgram annotation. Defaults to Annotation("<token>:saldo.lemgram").
model (str, optional): Crosslink model. Defaults to Model("hist/diapivot.pickle").
"""
lexicon = PivotLexicon(model)
lemgram_annotation = list(lemgram.read())
out_annotation = []
for lemgrams in lemgram_annotation:
saldo_ids = []
for lemgram in lemgrams.split(util.DELIM):
s_i = lexicon.get_exactMatch(lemgram)
if s_i:
saldo_ids += [s_i]
out_annotation.append(util.AFFIX + util.DELIM.join(set(saldo_ids)) + util.AFFIX if saldo_ids else util.AFFIX)
out.write(out_annotation)
# @modelbuilder("Diapivot model", language=["swe"])
def build_diapivot(out: ModelOutput = ModelOutput("hist/diapivot.pickle")):
"""Download diapivot XML dictionary and save as a pickle file."""
# Download diapivot.xml
xml_model = Model("hist/diapivot.xml")
xml_model.download("https://svn.spraakdata.gu.se/sb-arkiv/pub/lmf/diapivot/diapivot.xml")
# Create pickle file
xml_lexicon = read_xml(xml_model.path)
log.info("Saving cross lexicon in Pickle format")
picklex = {}
for lem in xml_lexicon:
lemgrams = []
for saldo, match in list(xml_lexicon[lem].items()):
lemgrams.append(PART_DELIM1.join([saldo, match]))
picklex[lem] = sorted(lemgrams)
out.write_pickle(picklex)
# Clean up
xml_model.remove()
################################################################################
# Auxiliaries
################################################################################
class PivotLexicon:
"""A lexicon for old swedish SALDO lookups.
It is initialized from a pickled file.
"""
def __init__(self, crossfile, verbose=True):
"""Read pickled lexicon."""
if verbose:
log.info("Reading cross lexicon: %s", crossfile)
with open(crossfile, "rb") as F:
self.lexicon = pickle.load(F)
if verbose:
log.info("OK, read %d words", len(self.lexicon))
def lookup(self, lem):
"""Lookup a word in the lexicon."""
if lem.lower() == lem:
annotation_tag_pairs = self.lexicon.get(lem, [])
else:
annotation_tag_pairs = self.lexicon.get(lem, []) + self.lexicon.get(lem.lower(), [])
return list(map(_split_val, annotation_tag_pairs))
def get_exactMatch(self, word):
"""Get only exact matches from lexicon."""
s = self.lookup(word)
if s and s[0] == "exactMatch":
return s[1]
def _split_val(key_val):
return key_val.rsplit(PART_DELIM1)[1]
def read_xml(xml):
"""Read the XML version of crosslinked lexicon."""
log.info("Reading XML lexicon")
lexicon = {}
context = etree.iterparse(xml, events=("start", "end")) # "start" needed to save reference to root element
context = iter(context)
_event, root = next(context)
for event, elem in context:
if event == "end":
if elem.tag == 'LexicalEntry':
lemma = elem.find("Lemma")
dalin, saldo = [], ''
for form in lemma.findall("FormRepresentation"):
cat = _findval(form, "category")
lem = _findval(form, "lemgram")
if cat == "modern":
saldo = lem
else:
match = _findval(form, "match")
dalin += [(lem, match)]
[lexicon.update({d: {'saldo': saldo, 'match': m}}) for (d, m) in dalin]
# Done parsing section. Clear tree to save memory
if elem.tag in ['LexicalEntry', 'frame', 'resFrame']:
root.clear()
testwords = ["tigerhjerta..nn.1",
"lågland..nn.1",
"gud..nn.1"]
util.test_lexicon(lexicon, testwords)
log.info("OK, read")
return lexicon
def _findval(elems, key):
for form in elems:
att = form.get("att", "")
if att == key:
return form.get("val")
return ""
| 33.313333
| 119
| 0.58295
| 571
| 4,997
| 5.003503
| 0.318739
| 0.025201
| 0.017851
| 0.017851
| 0.097305
| 0.074904
| 0.074904
| 0.050403
| 0.050403
| 0.050403
| 0
| 0.003802
| 0.263158
| 4,997
| 149
| 120
| 33.536913
| 0.772135
| 0.20012
| 0
| 0.045455
| 0
| 0.011364
| 0.125965
| 0.011185
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.056818
| 0.011364
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a388679dce82d3f7e5c312799aab790d1280f39
| 440
|
py
|
Python
|
src/reporter/tests/test_api.py
|
msgis/ngsi-timeseries-api
|
5cc7a8beab748cecfd5fba61740f3730361d4e31
|
[
"MIT"
] | null | null | null |
src/reporter/tests/test_api.py
|
msgis/ngsi-timeseries-api
|
5cc7a8beab748cecfd5fba61740f3730361d4e31
|
[
"MIT"
] | null | null | null |
src/reporter/tests/test_api.py
|
msgis/ngsi-timeseries-api
|
5cc7a8beab748cecfd5fba61740f3730361d4e31
|
[
"MIT"
] | null | null | null |
from conftest import QL_URL
import requests
def test_api():
api_url = "{}/".format(QL_URL)
r = requests.get('{}'.format(api_url))
assert r.status_code == 200, r.text
assert r.json() == {
"notify_url": "/v2/notify",
"subscriptions_url": "/v2/subscriptions",
"entities_url": "/v2/entities",
"types_url": "/v2/types",
"attributes_url": "/v2/attrs"
}
| 29.333333
| 53
| 0.543182
| 51
| 440
| 4.470588
| 0.490196
| 0.109649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025559
| 0.288636
| 440
| 14
| 54
| 31.428571
| 0.702875
| 0
| 0
| 0
| 0
| 0
| 0.281818
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a39601bd5d34aa0ef10ce85dcff9883e1a2620c
| 6,349
|
py
|
Python
|
gym_combat/gym_combat/envs/main.py
|
refaev/combat_gym
|
f02fcf98e95a1dda29cdddd4ae271de3e18ea3bf
|
[
"MIT"
] | null | null | null |
gym_combat/gym_combat/envs/main.py
|
refaev/combat_gym
|
f02fcf98e95a1dda29cdddd4ae271de3e18ea3bf
|
[
"MIT"
] | null | null | null |
gym_combat/gym_combat/envs/main.py
|
refaev/combat_gym
|
f02fcf98e95a1dda29cdddd4ae271de3e18ea3bf
|
[
"MIT"
] | null | null | null |
from matplotlib import style
from tqdm import tqdm
style.use("ggplot")
from gym_combat.envs.Arena.CState import State
from gym_combat.envs.Arena.Entity import Entity
from gym_combat.envs.Arena.Environment import Environment, Episode
from gym_combat.envs.Common.constants import *
from gym_combat.envs.Qtable import Qtable_DecisionMaker
from gym_combat.envs.DQN import DQNAgent_keras
from gym_combat.envs.Greedy import Greedy_player
import matplotlib.pyplot as plt
def print_start_of_game_info(blue_decision_maker, red_decision_maker):
print("Starting tournament!")
print("Blue player type: ", Agent_type_str[blue_decision_maker.type()])
if blue_decision_maker.path_model_to_load==None:
print("Blue player starting with no model")
else:
print("Blue player starting tournament with trained model: " , blue_decision_maker.path_model_to_load)
print("Red player type: ", Agent_type_str[red_decision_maker.type()])
if red_decision_maker.path_model_to_load==None:
print("Red player starting with no model")
else:
print("Red player starting tournament with trained model: " , red_decision_maker.path_model_to_load)
print("Number of rounds: ", NUM_OF_EPISODES)
print("~~~ GO! ~~~\n\n")
def evaluate(episode_number):
#if episode_number % EVALUATE_PLAYERS_EVERY == 0:
a = episode_number % EVALUATE_PLAYERS_EVERY
if a>=0 and a<EVALUATE_BATCH_SIZE:
EVALUATE = True
else:
EVALUATE = False
return EVALUATE
def print_states(observation_for_blue_s0, observation_for_blue_s1):
import matplotlib.pyplot as plt
plt.matshow(observation_for_blue_s0.img)
plt.show()
plt.matshow(observation_for_blue_s1.img)
plt.show()
if __name__ == '__main__':
env = Environment(IS_TRAINING)
print("Starting Blue player")
blue_decision_maker = DQNAgent_keras.DQNAgent_keras()
#blue_decision_maker = DQNAgent_keras.DQNAgent_keras(UPDATE_CONTEXT=True, path_model_to_load='conv1(6_6_1_256)_conv2(4_4_256_128)_conv3(3_3_128_128)_flatten_fc__blue_202001_ 0.95max_ -0.04avg_ -3.10min__1620558885.model')
print("Starting red player")
### Red Decision Maker
red_decision_maker = Greedy_player.Greedy_player()
env.blue_player = Entity(blue_decision_maker)
env.red_player = Entity(red_decision_maker)
print_start_of_game_info(blue_decision_maker, red_decision_maker)
NUM_OF_EPISODES = env.NUMBER_OF_EPISODES
for episode in tqdm(range(1, NUM_OF_EPISODES + 1), ascii=True, unit='episodes'):
EVALUATE = evaluate(episode)
current_episode = Episode(episode, EVALUATE, show_always=False if IS_TRAINING else True)
# set new start position for the players
env.reset_game(episode)
# get observation
observation_for_blue_s0: State = env.get_observation_for_blue()
action_blue = -1
# initialize the decision_makers for the players
blue_decision_maker.set_initial_state(observation_for_blue_s0, episode)
#red_decision_maker.set_initial_state(observation_for_red_s0, episode) # for non-greedy players
blue_won_the_game = False
red_won_the_game = False
for steps_current_game in range(1, MAX_STEPS_PER_EPISODE + 1):
##### Blue's turn! #####
observation_for_blue_s0: State = env.get_observation_for_blue()
current_episode.print_episode(env, steps_current_game)
action_blue: AgentAction = blue_decision_maker.get_action(observation_for_blue_s0, EVALUATE)
env.take_action(Color.Blue, action_blue) # take the action!
current_episode.print_episode(env, steps_current_game)
current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Blue) is not WinEnum.NoWin)
if current_episode.is_terminal:# Blue won the game!
blue_won_the_game=True
else:
##### Red's turn! #####
observation_for_red_s0: State = env.get_observation_for_red()
action_red: AgentAction = red_decision_maker.get_action(observation_for_red_s0, EVALUATE)
env.take_action(Color.Red, action_red) # take the action!
current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Red) is not WinEnum.NoWin)
if current_episode.is_terminal: # Blue won the game!
red_won_the_game = True
current_episode.print_episode(env, steps_current_game)
reward_step_blue, reward_step_red = env.handle_reward(steps_current_game)
current_episode.episode_reward_red += reward_step_red
current_episode.episode_reward_blue += reward_step_blue
observation_for_blue_s1: State = env.get_observation_for_blue()
blue_decision_maker.update_context(observation_for_blue_s0, action_blue, reward_step_blue, observation_for_blue_s1,
current_episode.is_terminal, EVALUATE)
if steps_current_game == MAX_STEPS_PER_EPISODE:
# if we exited the loop because we reached MAX_STEPS_PER_EPISODE
current_episode.is_terminal = True
if blue_won_the_game or red_won_the_game:
break
# for statistics
env.update_win_counters(steps_current_game)
env.data_for_statistics(current_episode.episode_reward_blue, current_episode.episode_reward_red, steps_current_game, blue_decision_maker.get_epsolon())
env.evaluate_info(EVALUATE, episode, steps_current_game, blue_decision_maker.get_epsolon())
if current_episode.episode_number % SAVE_STATS_EVERY == 0:
if False:#blue_decision_maker.type()== AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic:
blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, "conv")#env.save_folder_path)
# print info of episode:
current_episode.print_info_of_episode(env, steps_current_game, blue_decision_maker.get_epsolon(), episode)
env.end_run()
if blue_decision_maker.type() == AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic:
blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, env.save_folder_path)
| 42.326667
| 229
| 0.723106
| 860
| 6,349
| 4.925581
| 0.181395
| 0.098206
| 0.080264
| 0.042493
| 0.522191
| 0.404627
| 0.342304
| 0.267233
| 0.1695
| 0.1695
| 0
| 0.014584
| 0.200819
| 6,349
| 149
| 230
| 42.610738
| 0.82026
| 0.127422
| 0
| 0.159574
| 0
| 0
| 0.058738
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031915
| false
| 0
| 0.117021
| 0
| 0.159574
| 0.212766
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a3afdedc7e9000d89eef5155bbd1cbb9eab9c08
| 4,132
|
py
|
Python
|
libqif/core/hyper.py
|
ramongonze/libqif
|
57be74a2342a303da5415a3d787855b8115e58f8
|
[
"MIT"
] | 2
|
2021-10-16T17:34:58.000Z
|
2021-11-16T16:15:13.000Z
|
libqif/core/hyper.py
|
ramongonze/libqif
|
57be74a2342a303da5415a3d787855b8115e58f8
|
[
"MIT"
] | null | null | null |
libqif/core/hyper.py
|
ramongonze/libqif
|
57be74a2342a303da5415a3d787855b8115e58f8
|
[
"MIT"
] | null | null | null |
"""Hyper-distributions."""
from libqif.core.secrets import Secrets
from libqif.core.channel import Channel
from numpy import array, arange, zeros
from numpy import delete as npdelete
class Hyper:
def __init__(self, channel):
"""Hyper-distribution. To create an instance of this class it is
class it is necessary to have an instance of :py:class:`.Channel`
class. Once created an instance of :py:class:`.Hyper`, the constructor
generates the joint, outer and inner distributions.
Attributes
----------
channel : core.Channel
Channel object.
joint : numpy.ndarray
Matrix of joint distribution.
outer : numpy.ndarray
Outer distribution.
inners : numpy.ndarray
Matrix of inner distributions.
num_posteriors : int
Number of posterior distributions resulted by reducing the
hyper-distribution, i.e., remove columns that contains only
zeros and merge columns which one of them a linear combination
of the other.
Parameters
----------
channel : core.Channel
Channel object.
"""
self._check_types(channel)
self.channel = channel
self.joint = self._generate_joint_distribution()
self.outer, self.inners = self._generate_posteriors()
self._reduce_hyper()
self.num_posteriors = len(self.outer)
def update_prior(self, prior):
"""Update the prior distribution on set of secrets.
The number of secrets must match the current number of rows of the channel.
Parameters
----------
prior : list, numpy.ndarray
Prior distribution on the set of secrets. prior[i] is the
probability of secret named labels[i] beeing the real secret.
"""
self.channel.update_prior(prior)
self.joint = self._generate_joint_distribution()
self.outer, self.inners = self._generate_posteriors()
self._reduce_hyper()
self.num_posteriors = len(self.outer)
def _check_types(self, channel):
if type(channel) != type(Channel(Secrets(['x1','x2'], [1,0]), ['y1'], array([[1],[1]]))):
raise TypeError('The parameter \'channel\' must be a core.channel.Channel object')
def _generate_joint_distribution(self):
joint = []
channel_t = self.channel.matrix.T
for i in arange(self.channel.num_outputs):
joint.append(self.channel.secrets.prior * channel_t[i])
return array(joint).T
def _generate_posteriors(self):
joint_t = self.joint.T.copy()
outer = []
for i in arange(self.channel.num_outputs):
outer.append(joint_t[i].sum())
if outer[i] > 0:
joint_t[i] = joint_t[i]/outer[i]
return array(outer), joint_t.T
def _reduce_hyper(self):
"""Given the hyper-distribution generated by _generate_posteriors
remove columns with zeros and merge columns that are a linear
combination of others. Thus algorithm has time complexity of O(n*m^2)
where n is the number of secrets and m is the number of outputs in
the.
"""
epsilon = 10**(-6)
# Delete inners that have 0 probability of occuring
zero_prob = self.outer < epsilon
self.outer = npdelete(self.outer, zero_prob, 0)
self.inners = npdelete(self.inners, zero_prob, 1)
delete_inner = [False] * len(self.outer)
for i in arange(self.inners.shape[1]):
for j in arange(i+1, self.inners.shape[1]):
# Check if inner i is equal to inner j
if (abs(self.inners[:,i] - self.inners[:,j]) < epsilon).sum() == self.channel.secrets.num_secrets:
delete_inner[j] = True # Delete inner j
self.outer[i] += self.outer[j] # Merge inner j into inner i
self.outer = npdelete(self.outer, delete_inner, 0)
self.inners = npdelete(self.inners, delete_inner, 1)
| 37.225225
| 114
| 0.609874
| 521
| 4,132
| 4.735125
| 0.261036
| 0.043778
| 0.014593
| 0.029185
| 0.224564
| 0.162951
| 0.128902
| 0.128902
| 0.102148
| 0.102148
| 0
| 0.006849
| 0.29332
| 4,132
| 110
| 115
| 37.563636
| 0.838014
| 0.363746
| 0
| 0.208333
| 0
| 0
| 0.025497
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.083333
| 0
| 0.270833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a3f937a42b26dd8a8d5325705ad3a6b2426f5e8
| 2,421
|
py
|
Python
|
pong.py
|
Teenahshe/ponggame
|
5e4032753894ce1e1ebeb51841676aac24aa22df
|
[
"MIT"
] | null | null | null |
pong.py
|
Teenahshe/ponggame
|
5e4032753894ce1e1ebeb51841676aac24aa22df
|
[
"MIT"
] | null | null | null |
pong.py
|
Teenahshe/ponggame
|
5e4032753894ce1e1ebeb51841676aac24aa22df
|
[
"MIT"
] | null | null | null |
"""
# Step 1 - Create the App
# Step 2 - Create the Game
# Step 3 - Build the Game
# Step 4 - Run the App
"""
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
from random import randint
class PongPaddle(Widget):
score = NumericProperty(0)
def bounce_ball(self, ball):
if self.collide_widget(ball):
ball.velocity_x *= -1
print('hello world')
class PongBall(Widget):
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
# Latest Position of the Ball = Current Velocity + Current Position
def move(self):
self.pos = Vector(*self.velocity) + self.pos
# Update - moving the ball by calling the move function and other stuff
# on touch_down() = When our fingers/mouse touches he screen
# on touch_up() - when we lift our finger off the screen after touching it
# on_touch_move() - when we drag our finger on the screen
class PongGame(Widget):
ball = ObjectProperty(None)
player1 = ObjectProperty(None)
player2 = ObjectProperty(None)
def serve_ball(self):
self.ball.velocity = Vector(4, 0).rotate(randint(0, 360))
def update(self, dt):
self.ball.move()
# Bounce off top and bottom Y
if (self.ball.y < 0) or (self.ball.y > self.height - 50):
self.ball.velocity_y *= -1.1
# Bounce off left and increase th score
if self.ball.x < 0:
self.ball.velocity_x *= -1
self.player1.score += 1
# Bounce off right and increase the score
if self.ball.x > self.width - 50:
self.ball.velocity_x *= -1
self.player2.score += 1
self.player1.bounce_ball(self.ball)
self.player2.bounce_ball(self.ball)
def on_touch_move(self, touch):
if touch.x < self.width / 1 / 4:
self.player1.center_y = touch.y
if touch.x > self.width * 3 / 4:
self.player2.center_y = touch.y
class PongApp(App):
def build(self):
game = PongGame()
game.serve_ball()
Clock.schedule_interval(game.update, 1.0 / 60.0)
return game
PongApp().run()
| 28.482353
| 83
| 0.620818
| 327
| 2,421
| 4.525994
| 0.299694
| 0.064865
| 0.043243
| 0.036486
| 0.074324
| 0.02973
| 0
| 0
| 0
| 0
| 0
| 0.024812
| 0.28418
| 2,421
| 84
| 84
| 28.821429
| 0.829198
| 0.219744
| 0
| 0.041667
| 0
| 0
| 0.006145
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.5
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a3fb6dff04d4cee8ea3de55fdb86c079b4a97dc
| 18,713
|
py
|
Python
|
bridge_RL_agent_v16.py
|
EricZLou/BridgeRLAgent
|
78329eec5fcf320d2850f44dc33b138919fba82d
|
[
"MIT"
] | null | null | null |
bridge_RL_agent_v16.py
|
EricZLou/BridgeRLAgent
|
78329eec5fcf320d2850f44dc33b138919fba82d
|
[
"MIT"
] | null | null | null |
bridge_RL_agent_v16.py
|
EricZLou/BridgeRLAgent
|
78329eec5fcf320d2850f44dc33b138919fba82d
|
[
"MIT"
] | null | null | null |
"""
CS 238 Final Project: Bridge RL Agent
Eric Lou & Kimberly Tran
"""
import copy
import datetime
import numpy as np
import random
from collections import namedtuple
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
REPRESENTATIONS OF BRIDGE
Representing a "Card" as an integer:
Cards 0 -> 12 are Club 2 -> Club 14
Cards 13 -> 25 are Diamond 2 -> Diamond 14
Cards 26 -> 38 are Heart 2 -> Heart 14
Cards 39 -> 51 are Spade 2 -> Spade 14
Jack is 11
Queen is 12
King is 13
Ace is 14
Representing a "Suit" as an integer:
n/a is -1 <-- used in a "State" where no cards have been played yet.
Clubs is 0
Diamonds is 1
Hearts is 2
Spades is 3
Representing a "State" as an opening suit and frozenset of up to 3 "Card"-s:
state = State(1, frozenset(23, 0))
We have a Diamond 12 and Club 2 with an opening suit of Diamonds.
The agent is 3rd to play a card and must play a Diamond if it has one.
Representing the MDP with a Map from a "State" to an array of length-52:
We call this Map "weights". And the array of length-52 represets the
proportion with which the agent should play each of the 52 cards given
that it is at that state.
In this example, with state = (1, set(23, 0)), weights[state] will
likely have very large values at indices 24 and 25 since a
Diamond 13 and Diamond 14 will beat the Diamond 12.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
State = namedtuple('State', ['opening_suit', 'cards_played', 'partners_card'])
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" DEFINE SOME CONSTANTS
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
NUM_ACTIONS = 52 # Agent can choose any card to play (only some are valid).
NUM_GAMES_TRAIN = 10000
NUM_GAMES_TEST = 10000
STATS_PER = 1000
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" RL AGENT
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
class BridgeAgent:
def __init__(self):
# We initialize all weights to 1 such that every card has an equal chance of being chosen.
self.weights = {}
self.weights[State(-1, frozenset(), -1)] = np.full(NUM_ACTIONS, 1.0)
for opening_suit in range(4):
for card_1 in range(52):
for card_2 in range(card_1, 52):
for card_3 in range(card_2, 52):
for card_partner in [-1, card_1, card_2, card_3]:
state = State(
opening_suit,
frozenset([card_1, card_2, card_3]),
card_partner)
self.weights[state] = np.full(NUM_ACTIONS, 1.0)
# self.alpha = 0.997 # 1,000
# self.alpha = 0.9995 # 10,000
# self.alpha = 0.99995 # 100,000
self.alpha = 0.999995 # 1,000,000
# self.alpha = 0.9999995 # 5,000,000
self.game_num = 1
"""
EXAMPLE
state = State(1, set(23, 0)) # Diamond 12, Club 2 <-- first 2 cards in round
card_played = 24 # Diamond 13 <-- 3rd card in round
If 4th card is not 25, then the agent wins. We want to incrase the proportion
with which we play 24.
ba.add_win(state, card_played)
"""
def add_win(self, state, card_played):
self.weights[state][card_played] *= (1 + 0.1 * self.alpha ** self.game_num)
"""
EXAMPLE
state = State(1, set(23, 0))
card_played = 24
If 4th card is 25 (Diamond 14), then the agent loses. We want to decrease the
proportion with which we play 24.
ba.add_loss(state, card_played)
"""
def add_loss(self, state, card_played):
self.weights[state][card_played] /= (1 + 0.1 * self.alpha ** self.game_num)
"""
EXAMPLE
state = State(1, set(23, 0))
cards_in_hand = set(0, 1, 4, 8, 11, 20, 24, 38)
The agent choose to play whichever remaining card has the highest weight.
The agent must play a Diamond if it has Diamonds. In this example, agent
will most likely play 24, which beats 23 <-- hopefully 24 has the highest
weight.
card_played = ba.play_card(state, cards_in_hand)
"""
def play_card(self, state, cards_in_hand):
# Following the EXAMPLE:
# suit = 1
suit = state.opening_suit
# valid_cards = [20, 24]
valid_cards = np.array([i for i in range(suit * 13, (suit + 1) * 13) if i in cards_in_hand])
if len(valid_cards) == 0:
valid_cards = cards_in_hand
# Choose the valid card with highest weight.
# index_into_valid_counts = 1 since 20 has a smaller weight than 24.
# index_into_valid_cards = np.argmax(self.weights[state][valid_cards])
index_into_valid_cards = np.random.choice(np.flatnonzero(self.weights[state][valid_cards] == self.weights[state][valid_cards].max()))
# returns valid_cards[1] = 24
return valid_cards[index_into_valid_cards]
"""
This function write the policy at the end of the data training phase.
"""
def write_policy(self, cards_in_hand, policy, filename, states_accessed):
count = 0
with open(filename + "_Last_Game.txt", 'w') as g:
g.write("Cards in Hand: {}\n\n".format(cards_in_hand))
with open(filename + ".txt", 'w') as f:
for state in self.weights:
f.write("State: suit {} | cards played {} | partner's card {}\nBest Card To Play: {}\n\n".format(state.opening_suit,
state.cards_played, state.partners_card,
policy[count]))
if state in states_accessed:
g.write("State: suit {} | cards played {} | partner's card {}\nBest Card To Play: {}\n\n".format(state.opening_suit,
state.cards_played, state.partners_card,
policy[count]))
count += 1
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" UTILITY FUNCTIONS
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
"""
This functions deals random cards.
"""
deck = list(range(52))
def shuffle_cards():
random.shuffle(deck)
return [deck[0:13], deck[13:26], deck[26:39], deck[39:52]]
"""
This function is used by non-agents who play randomly.
"""
def play_random_card(suit, cards_in_hand):
if suit == -1:
return random.choice(cards_in_hand)
valid_cards = [i for i in range(suit * 13, (suit + 1) * 13) if i in cards_in_hand]
if len(valid_cards) == 0:
return random.choice(cards_in_hand)
return random.choice(valid_cards)
"""
This function determines the winner of the round.
"""
def determine_round_winner(suit, cards_played):
max_idx = -1
max_val = -1
for idx, card in enumerate(cards_played):
if suit * 13 <= card < (suit + 1) * 13 and card > max_val:
max_val, max_idx = card, idx
return max_idx
"""
This function determines the declarer based on partnership with the most points.
Return: (agent_is_declarer, declarer_idx)
"""
def agent_declarer(hands):
points = count_points(hands) # determines the number of points in each hand
# agent's partnership has more points and agent is declarer
if points[0] + points[2] > points[1] + points[3] and points[2] > points[0]:
return True, 2
# agent is not declarer and agent should start the play
return False, -1
"""
This function counts the points in each hand.
Note: Ace is 12, 25, 38, 51
"""
def count_points(hands):
points = []
for hand in hands:
p = 0
for card in hand:
if card % 13 == 12:
p += 4
elif card % 13 == 11:
p += 3
elif card % 13 == 10:
p += 2
elif card % 13 == 9:
p += 1
points.append(p)
return points
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" TRACKS PERFORMANCE OF BRIDGE AGENT
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
class BridgeAgentRedFlags:
def __init__(self):
self.RED_FLAG_VIOLATIONS = np.zeros(3)
self.RED_FLAG_TOTAL_COUNT = np.zeros(3)
self.ALL_RED_FLAG_VIOLATIONS = np.zeros(3) # Cumulative
self.ALL_RED_FLAG_TOTAL_COUNT = np.zeros(3) # Cumulative
def clear_red_flags(self):
self.RED_FLAG_VIOLATIONS = np.zeros(3)
self.RED_FLAG_TOTAL_COUNT = np.zeros(3)
"""
This function checks if the agent plays their highest card even though the
highest card already played is higher than theirs.
"""
def highest_card(self, valid_cards, agent_valid_cards, card):
if len(agent_valid_cards) > 1 and max(valid_cards) > max(agent_valid_cards):
self.RED_FLAG_TOTAL_COUNT[0] += 1
self.ALL_RED_FLAG_TOTAL_COUNT[0] += 1
if card == max(agent_valid_cards):
self.RED_FLAG_VIOLATIONS[0] += 1
self.ALL_RED_FLAG_VIOLATIONS[0] += 1
"""
This function checks if the agent wins a round when there's three cards played already
and the agent has at least one higher card than what's been played.
"""
def higher_card(self, valid_cards, agent_valid_cards, card, cards_played, partners_cards):
if (len(cards_played) == 3 and len(agent_valid_cards) > 1 and
max(agent_valid_cards) > max(valid_cards) and
max(valid_cards) not in partners_cards
):
self.RED_FLAG_TOTAL_COUNT[1] += 1
self.ALL_RED_FLAG_TOTAL_COUNT[1] += 1
if card < max(valid_cards):
self.RED_FLAG_VIOLATIONS[1] += 1
self.ALL_RED_FLAG_VIOLATIONS[1] += 1
"""
This function checks if the agent plays a higher card even though their partner is guaranteed to win.
"""
def partner_win(self, valid_cards, agent_valid_cards, card, cards_played, partners_cards):
if (len(cards_played) == 3 and len(agent_valid_cards) > 1 and
max(valid_cards) in partners_cards
):
self.RED_FLAG_TOTAL_COUNT[2] += 1
self.ALL_RED_FLAG_TOTAL_COUNT[2] += 1
if card > max(valid_cards):
self.RED_FLAG_VIOLATIONS[2] += 1
self.ALL_RED_FLAG_VIOLATIONS[2] += 1
"""
This function checks for any red flags based on what the agent played.
"""
def assess_card_played(self, hands, card, suit, cards_played, player_idx, partners_cards):
all_valid_cards = list(range(suit * 13, (suit + 1) * 13))
valid_cards = np.array([i for i in all_valid_cards if i in cards_played])
agent_valid_cards = np.array([i for i in all_valid_cards if i in hands[player_idx]])
if suit == -1:
return
# highest card played so far is higher than agent's highest card
self.highest_card(valid_cards, agent_valid_cards, card)
# 3 cards played and agent has higher cards, does it play highest card or highest necessary card?
self.higher_card(valid_cards, agent_valid_cards, card, cards_played, partners_cards)
# 3 cards played + partner has played highest card, does agent play lowest card? do they beat their partner?
self.partner_win(valid_cards, agent_valid_cards, card, cards_played, partners_cards)
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" PLAY A SINGLE GAME OF BRIDGE
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
"""
This function plays 13 rounds of 1 NT bridge and outputs a winner.
"""
def play_game(game, hands, train=False, ba=None, barf=None):
partners_cards = copy.copy(hands[0])
agents_cards = copy.copy(hands[2])
declarer, d = agent_declarer(hands)
"""
hands[0] = North's cards
hands[1] = East's cards
hands[2] = Agent's cards
hands[3] = West's cards
"""
round_winner = (d + 1) % 4 # the person to the right of the declarer starts the game
NS_Wins = 0 # used to count total wins in agent partnership
states_accessed = [] # records which states have been updated for this game
# For each round
for _ in range(13):
cards_played = []
agent_card_played = [-1, -1]
agent_state = None
agent_state_2 = None
opening_suit = -1
# Each player plays a card in order starting from round_winner
for player in range(4):
card = None
player_idx = (round_winner + player) % 4
if player_idx == 2: # Agent plays
if ba:
agent_state = State(opening_suit, frozenset(cards_played), agent_card_played[1])
states_accessed.append(agent_state)
card = ba.play_card(agent_state, hands[player_idx])
else:
card = play_random_card(opening_suit, hands[player_idx])
agent_card_played[0] = card
barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx, partners_cards)
elif player_idx == 0: # if agent is declarer, they play their partner's cards
if ba and declarer:
agent_state_2 = State(opening_suit, frozenset(cards_played), agent_card_played[0])
states_accessed.append(agent_state_2)
card = ba.play_card(agent_state_2, hands[player_idx])
barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx, partners_cards)
else:
card = play_random_card(opening_suit, hands[player_idx])
agent_card_played[1] = card
else: # Random bot plays
card = play_random_card(opening_suit, hands[player_idx])
# Keep track of the opening suit.
if player == 0:
opening_suit = card // 13
hands[player_idx].remove(card)
cards_played.append(card)
# Get the winning card.
round_winner = (determine_round_winner(opening_suit, cards_played) + round_winner) % 4
# Adjust the BridgeAgent weights.
# If the BridgeAgent or N wins.
if round_winner == 0 or round_winner == 2:
if ba and train:
ba.add_win(agent_state, agent_card_played[0])
if declarer:
ba.add_win(agent_state_2, agent_card_played[1])
NS_Wins += 1
else:
if ba and train:
ba.add_loss(agent_state, agent_card_played[0])
if declarer:
ba.add_loss(agent_state_2, agent_card_played[1])
# for the last game, determine and write out policy
if ba and game == (NUM_GAMES_TRAIN - 1):
policy = []
count = 0
for x in ba.weights:
y = copy.deepcopy(ba.weights[x])
max = np.argmax(y)
while max in x.cards_played:
y[max] = -1
max = np.argmax(y)
policy.append(max)
count += 1
game_file = "Bridge_" + str(game + 1)
ba.write_policy(agents_cards, policy, game_file, states_accessed)
return NS_Wins
def game_summary(ba, t, iterations=NUM_GAMES_TRAIN):
with open(str(NUM_GAMES_TRAIN) + "_Game_Data_Train-" + str(t) + ".csv", 'w') as k:
k.write("game,"
"agent_wins,random_wins,diff_wins,"
"agent_rfv_a,agent_rftc_a,"
"agent_rfv_b,agent_rftc_b,"
"agent_rfv_c,agent_rftc_c,"
"random_rfv_a,random_rftc_a,"
"random_rfv_b,random_rftc_b,"
"random_rfv_c,random_rftc_c\n")
barf = BridgeAgentRedFlags()
barf_random = BridgeAgentRedFlags()
NS_Wins = [0]
NS_Wins_random = [0]
for game in range(iterations):
hands = shuffle_cards()
NS_Wins[-1] += play_game(game=game, hands=copy.deepcopy(hands), train=True, ba=ba, barf=barf)
NS_Wins_random[-1] += play_game(game=game, hands=hands, ba=None, barf=barf_random)
ba.game_num += 1
if (game + 1) % STATS_PER == 0:
print(f"{game + 1} / ", end="", flush=True)
rfv = barf.RED_FLAG_VIOLATIONS
rfv_random = barf_random.RED_FLAG_VIOLATIONS
rftc = barf.RED_FLAG_TOTAL_COUNT
rftc_random = barf_random.RED_FLAG_TOTAL_COUNT
with open(str(NUM_GAMES_TRAIN) + "_Game_Data_Train-" + str(t) + ".csv", 'a') as k:
k.write(
f"{game + 1},"
f"{NS_Wins[-1]},{NS_Wins_random[-1]},{NS_Wins[-1] - NS_Wins_random[-1]},"
f"{rfv[0]},{rftc[0]},"
f"{rfv[1]},{rftc[1]},"
f"{rfv[2]},{rftc[2]},"
f"{rfv_random[0]},{rftc_random[0]},"
f"{rfv_random[1]},{rftc_random[1]},"
f"{rfv_random[2]},{rftc_random[2]},"
f"\n")
# Cumulative statistics on red flags for every STATS_PER games.
barf.clear_red_flags()
barf_random.clear_red_flags()
NS_Wins.append(0)
NS_Wins_random.append(0)
average_win_delta = (sum(NS_Wins)-sum(NS_Wins_random)) / ((len(NS_Wins) - 1) * STATS_PER)
average_rf_ratios_agent = np.divide(barf.ALL_RED_FLAG_VIOLATIONS, barf.ALL_RED_FLAG_TOTAL_COUNT)
average_rf_ratios_random = np.divide(barf_random.ALL_RED_FLAG_VIOLATIONS, barf_random.ALL_RED_FLAG_TOTAL_COUNT)
print(f"Average Win Delta (want this to be positive): {average_win_delta}")
print(f"Average Red Flag Ratios - Agent: {average_rf_ratios_agent}")
print(f"Average Red Flag Ratios - Random: {average_rf_ratios_random}")
with open(str(NUM_GAMES_TRAIN) + "_Game_Data_Avg_Train-" + str(t) + ".csv", 'w') as m:
m.write(f"avg_win_delta,avg_rf_agent,avg_rf_random\n"
f"{average_win_delta},{average_rf_ratios_agent},{average_rf_ratios_random}\n")
return ba
def main():
start_time = datetime.datetime.now()
hands = []
# TRAINING
print(f"TRAINING on {NUM_GAMES_TRAIN} games")
ba = BridgeAgent()
ba = game_summary(ba, True)
# TESTING -- we don't change the weights here
print(f"TESTING on {NUM_GAMES_TEST} games")
game_summary(ba, False, iterations=NUM_GAMES_TEST)
end_time = datetime.datetime.now()
print("Runtime: ", end_time - start_time) # runtime
if __name__ == "__main__":
main()
| 37.880567
| 141
| 0.579276
| 2,554
| 18,713
| 4.036805
| 0.136257
| 0.042677
| 0.021436
| 0.021436
| 0.340834
| 0.29612
| 0.241707
| 0.193986
| 0.178274
| 0.153055
| 0
| 0.032978
| 0.288623
| 18,713
| 493
| 142
| 37.957404
| 0.741511
| 0.088708
| 0
| 0.149425
| 0
| 0.007663
| 0.088801
| 0.040498
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072797
| false
| 0
| 0.019157
| 0
| 0.145594
| 0.02682
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a40e4db387ff19b81d94d3c6d3164793744fc01
| 1,411
|
py
|
Python
|
tests/hacsbase/test_hacsbase_data.py
|
chbonkie/hacs
|
81db513a0d3d1af1acf25da7b706ae62d8fdb6fa
|
[
"MIT"
] | 2
|
2019-06-18T11:30:53.000Z
|
2019-10-03T21:34:11.000Z
|
tests/hacsbase/test_hacsbase_data.py
|
chbonkie/hacs
|
81db513a0d3d1af1acf25da7b706ae62d8fdb6fa
|
[
"MIT"
] | 341
|
2019-06-18T11:30:55.000Z
|
2021-07-15T05:38:46.000Z
|
tests/hacsbase/test_hacsbase_data.py
|
chbonkie/hacs
|
81db513a0d3d1af1acf25da7b706ae62d8fdb6fa
|
[
"MIT"
] | null | null | null |
"""Data Test Suite."""
from aiogithubapi.objects import repository
import pytest
import os
from homeassistant.core import HomeAssistant
from custom_components.hacs.hacsbase.data import HacsData
from custom_components.hacs.helpers.classes.repository import HacsRepository
from custom_components.hacs.hacsbase.configuration import Configuration
from custom_components.hacs.share import get_hacs
from tests.dummy_repository import dummy_repository_base
@pytest.mark.asyncio
async def test_hacs_data_async_write1(tmpdir):
data = HacsData()
hacs = get_hacs()
repository = dummy_repository_base()
repository.data.installed = True
repository.data.installed_version = "1"
hacs.repositories = [repository]
hacs.hass = HomeAssistant()
hacs.hass.config.config_dir = tmpdir
hacs.configuration = Configuration()
await data.async_write()
@pytest.mark.asyncio
async def test_hacs_data_async_write2(tmpdir):
data = HacsData()
hacs = get_hacs()
hacs.hass = HomeAssistant()
hacs.hass.config.config_dir = tmpdir
hacs.configuration = Configuration()
hacs.system.status.background_task = False
hacs.system.disabled = False
await data.async_write()
@pytest.mark.asyncio
async def test_hacs_data_restore(tmpdir):
data = HacsData()
hacs = get_hacs()
hacs.hass = HomeAssistant()
hacs.hass.config.config_dir = tmpdir
await data.restore()
| 30.021277
| 76
| 0.763997
| 175
| 1,411
| 5.982857
| 0.285714
| 0.045845
| 0.076409
| 0.091691
| 0.496657
| 0.43553
| 0.407832
| 0.407832
| 0.407832
| 0.362942
| 0
| 0.002502
| 0.150248
| 1,411
| 46
| 77
| 30.673913
| 0.870726
| 0.011339
| 0
| 0.5
| 0
| 0
| 0.00072
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.236842
| 0
| 0.236842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a42b37643b67ad750eaa6bdb4b138eb04976787
| 2,736
|
py
|
Python
|
bpython/curtsiesfrontend/parse.py
|
dtrodrigues/bpython
|
143e4e55d8f5227149528a5880a32a516a40f14d
|
[
"PSF-2.0"
] | 2,168
|
2015-01-01T11:41:40.000Z
|
2022-03-29T07:44:48.000Z
|
bpython/curtsiesfrontend/parse.py
|
dtrodrigues/bpython
|
143e4e55d8f5227149528a5880a32a516a40f14d
|
[
"PSF-2.0"
] | 521
|
2015-01-02T16:43:44.000Z
|
2022-03-31T12:37:55.000Z
|
bpython/curtsiesfrontend/parse.py
|
dtrodrigues/bpython
|
143e4e55d8f5227149528a5880a32a516a40f14d
|
[
"PSF-2.0"
] | 250
|
2015-01-08T21:28:18.000Z
|
2022-02-28T16:07:43.000Z
|
import re
from curtsies.formatstring import fmtstr, FmtStr
from curtsies.termformatconstants import (
FG_COLORS,
BG_COLORS,
colors as CURTSIES_COLORS,
)
from functools import partial
from ..lazyre import LazyReCompile
COLORS = CURTSIES_COLORS + ("default",)
CNAMES = dict(zip("krgybmcwd", COLORS))
# hack for finding the "inverse"
INVERSE_COLORS = {
CURTSIES_COLORS[idx]: CURTSIES_COLORS[
(idx + (len(CURTSIES_COLORS) // 2)) % len(CURTSIES_COLORS)
]
for idx in range(len(CURTSIES_COLORS))
}
INVERSE_COLORS["default"] = INVERSE_COLORS[CURTSIES_COLORS[0]]
def func_for_letter(letter_color_code: str, default: str = "k"):
"""Returns FmtStr constructor for a bpython-style color code"""
if letter_color_code == "d":
letter_color_code = default
elif letter_color_code == "D":
letter_color_code = default.upper()
return partial(
fmtstr,
fg=CNAMES[letter_color_code.lower()],
bold=letter_color_code.isupper(),
)
def color_for_letter(letter_color_code: str, default: str = "k"):
if letter_color_code == "d":
letter_color_code = default
return CNAMES[letter_color_code.lower()]
def parse(s):
"""Returns a FmtStr object from a bpython-formatted colored string"""
rest = s
stuff = []
while True:
if not rest:
break
start, rest = peel_off_string(rest)
stuff.append(start)
return (
sum((fs_from_match(d) for d in stuff[1:]), fs_from_match(stuff[0]))
if len(stuff) > 0
else FmtStr()
)
def fs_from_match(d):
atts = {}
if d["fg"]:
# this isn't according to spec as I understand it
if d["fg"].isupper():
d["bold"] = True
# TODO figure out why boldness isn't based on presence of \x02
color = CNAMES[d["fg"].lower()]
if color != "default":
atts["fg"] = FG_COLORS[color]
if d["bg"]:
if d["bg"] == "I":
# hack for finding the "inverse"
color = INVERSE_COLORS[color]
else:
color = CNAMES[d["bg"].lower()]
if color != "default":
atts["bg"] = BG_COLORS[color]
if d["bold"]:
atts["bold"] = True
return fmtstr(d["string"], **atts)
peel_off_string_re = LazyReCompile(
r"""(?P<colormarker>\x01
(?P<fg>[krgybmcwdKRGYBMCWD]?)
(?P<bg>[krgybmcwdKRGYBMCWDI]?)?)
(?P<bold>\x02?)
\x03
(?P<string>[^\x04]*)
\x04
(?P<rest>.*)
""",
re.VERBOSE | re.DOTALL,
)
def peel_off_string(s):
m = peel_off_string_re.match(s)
assert m, repr(s)
d = m.groupdict()
rest = d["rest"]
del d["rest"]
return d, rest
| 25.811321
| 75
| 0.592105
| 353
| 2,736
| 4.424929
| 0.303116
| 0.069142
| 0.105634
| 0.03073
| 0.21767
| 0.1242
| 0.1242
| 0.1242
| 0.099872
| 0
| 0
| 0.008538
| 0.272295
| 2,736
| 105
| 76
| 26.057143
| 0.775992
| 0.107091
| 0
| 0.081081
| 0
| 0
| 0.038514
| 0
| 0
| 0
| 0
| 0.009524
| 0.013514
| 1
| 0.067568
| false
| 0
| 0.067568
| 0
| 0.202703
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a448dff56ffb800e61093b735c0b738b7008227
| 12,168
|
py
|
Python
|
VegaZero2VegaLite.py
|
Thanksyy/Vega-Zero
|
dd25cb145faec047b01ca54c69ba96c56adb99f4
|
[
"MIT"
] | 5
|
2021-09-16T11:55:12.000Z
|
2022-03-03T12:20:22.000Z
|
VegaZero2VegaLite.py
|
Thanksyy/Vega-Zero
|
dd25cb145faec047b01ca54c69ba96c56adb99f4
|
[
"MIT"
] | 1
|
2021-11-22T09:41:52.000Z
|
2021-11-24T02:25:49.000Z
|
VegaZero2VegaLite.py
|
Thanksyy/Vega-Zero
|
dd25cb145faec047b01ca54c69ba96c56adb99f4
|
[
"MIT"
] | 2
|
2021-09-17T09:44:18.000Z
|
2022-03-05T19:14:45.000Z
|
__author__ = "Yuyu Luo"
import json
import pandas
class VegaZero2VegaLite(object):
def __init__(self):
pass
def parse_vegaZero(self, vega_zero):
self.parsed_vegaZero = {
'mark': '',
'data': '',
'encoding': {
'x': '',
'y': {
'aggregate': '',
'y': ''
},
'color': {
'z': ''
}
},
'transform': {
'filter': '',
'group': '',
'bin': {
'axis': '',
'type': ''
},
'sort': {
'axis': '',
'type': ''
},
'topk': ''
}
}
vega_zero_keywords = vega_zero.split(' ')
self.parsed_vegaZero['mark'] = vega_zero_keywords[vega_zero_keywords.index('mark') + 1]
self.parsed_vegaZero['data'] = vega_zero_keywords[vega_zero_keywords.index('data') + 1]
self.parsed_vegaZero['encoding']['x'] = vega_zero_keywords[vega_zero_keywords.index('x') + 1]
self.parsed_vegaZero['encoding']['y']['y'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 2]
self.parsed_vegaZero['encoding']['y']['aggregate'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 1]
if 'color' in vega_zero_keywords:
self.parsed_vegaZero['encoding']['color']['z'] = vega_zero_keywords[vega_zero_keywords.index('color') + 1]
if 'topk' in vega_zero_keywords:
self.parsed_vegaZero['transform']['topk'] = vega_zero_keywords[vega_zero_keywords.index('topk') + 1]
if 'sort' in vega_zero_keywords:
self.parsed_vegaZero['transform']['sort']['axis'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 1]
self.parsed_vegaZero['transform']['sort']['type'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 2]
if 'group' in vega_zero_keywords:
self.parsed_vegaZero['transform']['group'] = vega_zero_keywords[vega_zero_keywords.index('group') + 1]
if 'bin' in vega_zero_keywords:
self.parsed_vegaZero['transform']['bin']['axis'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 1]
self.parsed_vegaZero['transform']['bin']['type'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 3]
if 'filter' in vega_zero_keywords:
filter_part_token = []
for each in vega_zero_keywords[vega_zero_keywords.index('filter') + 1:]:
if each not in ['group', 'bin', 'sort', 'topk']:
filter_part_token.append(each)
else:
break
if 'between' in filter_part_token:
filter_part_token[filter_part_token.index('between') + 2] = 'and ' + filter_part_token[
filter_part_token.index('between') - 1] + ' <='
filter_part_token[filter_part_token.index('between')] = '>='
# replace 'and' -- 'or'
filter_part_token = ' '.join(filter_part_token).split()
filter_part_token = ['&' if x == 'and' else x for x in filter_part_token]
filter_part_token = ['|' if x == 'or' else x for x in filter_part_token]
if '&' in filter_part_token or '|' in filter_part_token:
final_filter_part = ''
each_conditions = []
for i in range(len(filter_part_token)):
each = filter_part_token[i]
if each != '&' and each != '|':
# ’=‘ in SQL --to--> ’==‘ in Vega-Lite
if each == '=':
each = '=='
each_conditions.append(each)
if each == '&' or each == '|' or i == len(filter_part_token) - 1:
# each = '&' or '|'
if 'like' == each_conditions[1]:
# only consider this case: '%a%'
if each_conditions[2][1] == '%' and each_conditions[2][len(each_conditions[2]) - 2] == '%':
final_filter_part += 'indexof(' + 'datum.' + each_conditions[0] + ',"' + \
each_conditions[2][2:len(each_conditions[2]) - 2] + '") != -1'
elif 'like' == each_conditions[2] and 'not' == each_conditions[1]:
if each_conditions[3][1] == '%' and each_conditions[3][len(each_conditions[3]) - 2] == '%':
final_filter_part += 'indexof(' + 'datum.' + each_conditions[0] + ',"' + \
each_conditions[3][2:len(each_conditions[3]) - 2] + '") == -1'
else:
final_filter_part += 'datum.' + ' '.join(each_conditions)
if i != len(filter_part_token) - 1:
final_filter_part += ' ' + each + ' '
each_conditions = []
self.parsed_vegaZero['transform']['filter'] = final_filter_part
else:
# only single filter condition
self.parsed_vegaZero['transform']['filter'] = 'datum.' + ' '.join(filter_part_token).strip()
return self.parsed_vegaZero
def to_VegaLite(self, vega_zero, dataframe=None):
self.VegaLiteSpec = {
'bar': {
"mark": "bar",
"encoding": {
"x": {"field": "x", "type": "nominal"},
"y": {"field": "y", "type": "quantitative"}
}
},
'arc': {
"mark": "arc",
"encoding": {
"color": {"field": "x", "type": "nominal"},
"theta": {"field": "y", "type": "quantitative"}
}
},
'line': {
"mark": "line",
"encoding": {
"x": {"field": "x", "type": "nominal"},
"y": {"field": "y", "type": "quantitative"}
}
},
'point': {
"mark": "point",
"encoding": {
"x": {"field": "x", "type": "quantitative"},
"y": {"field": "y", "type": "quantitative"}
}
}
}
VegaZero = self.parse_vegaZero(vega_zero)
# assign some vega-zero keywords to the VegaLiteSpec object
if isinstance(dataframe, pandas.core.frame.DataFrame):
self.VegaLiteSpec[VegaZero['mark']]['data'] = dict()
self.VegaLiteSpec[VegaZero['mark']]['data']['values'] = json.loads(dataframe.to_json(orient='records'))
if VegaZero['mark'] != 'arc':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['field'] = VegaZero['encoding']['x']
self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['field'] = VegaZero['encoding']['y']['y']
if VegaZero['encoding']['y']['aggregate'] != '' and VegaZero['encoding']['y']['aggregate'] != 'none':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['aggregate'] = VegaZero['encoding']['y']['aggregate']
else:
self.VegaLiteSpec[VegaZero['mark']]['encoding']['color']['field'] = VegaZero['encoding']['x']
self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['field'] = VegaZero['encoding']['y']['y']
if VegaZero['encoding']['y']['aggregate'] != '' and VegaZero['encoding']['y']['aggregate'] != 'none':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['aggregate'] = VegaZero['encoding']['y'][
'aggregate']
if VegaZero['encoding']['color']['z'] != '':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['color'] = {
'field': VegaZero['encoding']['color']['z'], 'type': 'nominal'
}
# it seems that the group will be performed by VegaLite defaultly, in our cases.
if VegaZero['transform']['group'] != '':
pass
if VegaZero['transform']['bin']['axis'] != '':
if VegaZero['transform']['bin']['axis'] == 'x':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['type'] = 'temporal'
if VegaZero['transform']['bin']['type'] in ['date', 'year', 'week', 'month']:
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = VegaZero['transform']['bin']['type']
elif VegaZero['transform']['bin']['type'] == 'weekday':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = 'week'
else:
print('Unknown binning step.')
if VegaZero['transform']['filter'] != '':
if 'transform' not in self.VegaLiteSpec[VegaZero['mark']]:
self.VegaLiteSpec[VegaZero['mark']]['transform'] = [{
"filter": VegaZero['transform']['filter']
}]
elif 'filter' not in self.VegaLiteSpec[VegaZero['mark']]['transform']:
self.VegaLiteSpec[VegaZero['mark']]['transform'].append({
"filter": VegaZero['transform']['filter']
})
else:
self.VegaLiteSpec[VegaZero['mark']]['transform']['filter'] += ' & ' + VegaZero['transform']['filter']
if VegaZero['transform']['topk'] != '':
if VegaZero['transform']['sort']['axis'] == 'x':
sort_field = VegaZero['encoding']['x']
elif VegaZero['transform']['sort']['axis'] == 'y':
sort_field = VegaZero['encoding']['y']['y']
else:
print('Unknown sorting field: ', VegaZero['transform']['sort']['axis'])
sort_field = VegaZero['transform']['sort']['axis']
if VegaZero['transform']['sort']['type'] == 'desc':
sort_order = 'descending'
else:
sort_order = 'ascending'
if 'transform' in self.VegaLiteSpec[VegaZero['mark']]:
current_filter = self.VegaLiteSpec[VegaZero['mark']]['transform'][0]['filter']
self.VegaLiteSpec[VegaZero['mark']]['transform'][0][
'filter'] = current_filter + ' & ' + "datum.rank <= " + str(VegaZero['transform']['topk'])
self.VegaLiteSpec[VegaZero['mark']]['transform'].insert(0, {
"window": [{
"field": sort_field,
"op": "dense_rank",
"as": "rank"
}],
"sort": [{"field": sort_field, "order": sort_order}]
})
else:
self.VegaLiteSpec[VegaZero['mark']]['transform'] = [
{
"window": [{
"field": sort_field,
"op": "dense_rank",
"as": "rank"
}],
"sort": [{"field": sort_field, "order": sort_order}]
},
{
"filter": "datum.rank <= " + str(VegaZero['transform']['topk'])
}
]
if VegaZero['transform']['sort']['axis'] != '':
if VegaZero['transform']['sort']['axis'] == 'x':
if VegaZero['transform']['sort']['type'] == 'desc':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = '-x'
else:
self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = 'x'
else:
if VegaZero['transform']['sort']['type'] == 'desc':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] = '-y'
else:
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] = 'y'
return self.VegaLiteSpec[VegaZero['mark']]
| 48.094862
| 123
| 0.469921
| 1,095
| 12,168
| 5.050228
| 0.122374
| 0.054973
| 0.098373
| 0.136709
| 0.641591
| 0.51953
| 0.459675
| 0.37396
| 0.204521
| 0.130922
| 0
| 0.005846
| 0.353386
| 12,168
| 252
| 124
| 48.285714
| 0.697001
| 0.022436
| 0
| 0.251163
| 0
| 0
| 0.17264
| 0
| 0.018605
| 0
| 0
| 0
| 0
| 1
| 0.013953
| false
| 0.009302
| 0.009302
| 0
| 0.037209
| 0.009302
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a452a7c2c457bc63abb482a8725d53337bd5e88
| 6,254
|
py
|
Python
|
utils/dancer.py
|
kmzbrnoI/ac-python
|
383802734e17d2a00c0b86083cf923517db02acd
|
[
"Apache-2.0"
] | null | null | null |
utils/dancer.py
|
kmzbrnoI/ac-python
|
383802734e17d2a00c0b86083cf923517db02acd
|
[
"Apache-2.0"
] | 2
|
2020-04-12T11:31:24.000Z
|
2020-04-14T17:17:00.000Z
|
utils/dancer.py
|
kmzbrnoI/ac-python
|
383802734e17d2a00c0b86083cf923517db02acd
|
[
"Apache-2.0"
] | null | null | null |
"""Library for executing user-defined dance."""
import logging
from typing import Any, Dict, Optional, Callable
import datetime
import ac
import ac.blocks
from ac import ACs, AC
JC = Dict[str, Any]
class DanceStartException(Exception):
pass
class Step:
"""Base class for all specific dance steps."""
def update(self, acn: AC) -> None:
pass
def on_start(self, acn: AC) -> None:
pass
def disp_str(self) -> str:
return ''
class JCNotFoundException(DanceStartException):
pass
class StepJC(Step):
"""
Process jc 'name'. If processed already, skip processing and continue.
"""
name_to_id: Dict[str, int] = {}
def __init__(self, name: str, type_: str = 'VC') -> None:
self.jc: Optional[JC] = None
self.type = type_
self.name = name
def update(self, acn: AC) -> None:
assert isinstance(acn, DanceAC)
if self.jc is None:
jcid = self.get_jc_id(self.name, acn)
self.jc = acn.pt_get(f'/jc/{jcid}?state=true')['jc']
if self.jc['state']['active']:
self.jc = None
acn.step_done()
return
result = acn.pt_put(f'/jc/{self.jc["id"]}/state', {})
if result['success']:
self.jc = None
acn.step_done()
def on_start(self, acn: AC) -> None:
self.get_jc_id(self.name, acn)
def get_jc_id(self, name: str, acn: AC) -> int:
if not StepJC.name_to_id:
jcs = acn.pt_get('/jc')['jc']
StepJC.name_to_id = {
jc['name']: jc['id']
for jc in jcs if jc['type'] == self.type
}
if name not in StepJC.name_to_id.keys():
raise JCNotFoundException(f'Jízdní cesta {self.name} neexistuje!')
return StepJC.name_to_id[name]
def disp_str(self) -> str:
return f'Stavění JC {self.name}'
class StepDelay(Step):
"""Delay any time."""
def __init__(self, delay: datetime.timedelta) -> None:
self.delay = delay
self.finish: Optional[datetime.datetime] = None
def update(self, acn: AC) -> None:
assert isinstance(acn, DanceAC)
if self.finish is None:
self.finish = datetime.datetime.now() + self.delay
if datetime.datetime.now() > self.finish:
self.finish = None
acn.step_done()
def disp_str(self) -> str:
return f'Čekání {self.delay}'
class BlockNotFoundException(DanceStartException):
pass
class StepWaitForBlock(Step):
"""Wait for specific state of any block. See examples below."""
name_to_id: Dict[str, int] = {}
def __init__(self, name: str, checker: Callable[[ac.Block], bool]) -> None:
self.name = name
self.checker = checker
self.block: Optional[ac.Block] = None
def update(self, acn: AC) -> None:
assert isinstance(acn, DanceAC)
if self.block is None:
blockid = self.get_block_id(self.name, acn)
self.block = acn.pt_get(f'/blocks/{blockid}?state=true')['block']
if self.checker(self.block):
self.block = None
acn.step_done()
else:
ac.blocks.register([self.block['id']])
def on_start(self, acn: AC) -> None:
self.get_block_id(self.name, acn)
def on_block_change(self, acn: AC, block: ac.Block) -> None:
assert isinstance(acn, DanceAC)
if self.block is None or block['id'] != self.block['id']:
return
if self.checker(block):
ac.blocks.unregister([self.block['id']])
self.block = None
acn.step_done()
def get_block_id(self, name: str, acn: AC) -> int:
if not StepWaitForBlock.name_to_id:
blocks = acn.pt_get('/blocks')['blocks']
StepWaitForBlock.name_to_id = {
block['name']: block['id'] for block in blocks
}
if name not in StepWaitForBlock.name_to_id.keys():
raise BlockNotFoundException(f"Blok {self.name} neexistuje!")
return StepWaitForBlock.name_to_id[name]
def disp_str(self) -> str:
return f'Čekání na stav bloku {self.name}'
def track_is_occupied(block: ac.Block) -> bool:
return bool(block['blockState']['state'] == 'occupied')
class DanceAC(AC):
"""This AC executes predefined steps."""
def __init__(self, id_: str, password: str,
steps: Dict[int, Step]) -> None:
AC.__init__(self, id_, password)
self.steps = steps
self.stepi = 0
def on_start(self) -> None:
logging.info('Start')
for stepi, step in self.steps.items():
try:
step.on_start(self)
except DanceStartException as e:
self.disp_error(f'Krok {stepi}: '+str(e))
self.done()
return
self.stepi = 1
self.send_step()
self.on_update()
def on_stop(self) -> None:
self.statestr = ''
self.statestr_send()
def on_update(self) -> None:
AC.on_update(self)
if not self.running():
return
if self.stepi in self.steps:
self.steps[self.stepi].update(self)
else:
logging.info('Done')
self.done()
def step_done(self) -> None:
logging.info(f'Step {self.stepi} done, '
f'going to step {self.stepi+1}...')
self.stepi += 1
self.send_step()
self.on_update()
def send_step(self) -> None:
if self.stepi in self.steps.keys():
if self.running():
description = self.steps[self.stepi].disp_str()
self.statestr = f'Aktuální krok: {self.stepi}: {description}'
self.statestr_send()
def on_block_change(self, block: ac.Block) -> None:
if (self.running() and
isinstance(self.steps[self.stepi], StepWaitForBlock)):
self.steps[self.stepi].on_block_change(self, block) # type: ignore
@ac.blocks.on_block_change()
def _on_block_change(block: ac.Block) -> None:
for acn in ACs.values():
if isinstance(acn, DanceAC):
acn.on_block_change(block)
| 28.820276
| 79
| 0.570675
| 799
| 6,254
| 4.337922
| 0.166458
| 0.032314
| 0.023081
| 0.026255
| 0.306982
| 0.255049
| 0.201096
| 0.171379
| 0.160993
| 0.128679
| 0
| 0.000914
| 0.300448
| 6,254
| 216
| 80
| 28.953704
| 0.791314
| 0.044132
| 0
| 0.301282
| 0
| 0
| 0.071248
| 0.012464
| 0
| 0
| 0
| 0
| 0.025641
| 1
| 0.166667
| false
| 0.044872
| 0.038462
| 0.032051
| 0.339744
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a46dce57aefdfdd686c732c07a762fc3d1085f3
| 780
|
py
|
Python
|
praw/models/reddit/mixins/reportable.py
|
zachwylde00/praw
|
ad1d73e6a4a33397bbd983bdfde1a4f99ce5607d
|
[
"BSD-2-Clause"
] | 38
|
2020-03-14T22:22:40.000Z
|
2022-02-24T18:05:45.000Z
|
praw/models/reddit/mixins/reportable.py
|
zachwylde00/praw
|
ad1d73e6a4a33397bbd983bdfde1a4f99ce5607d
|
[
"BSD-2-Clause"
] | 3
|
2021-03-30T13:15:12.000Z
|
2021-09-22T18:55:59.000Z
|
praw/models/reddit/mixins/reportable.py
|
zachwylde00/praw
|
ad1d73e6a4a33397bbd983bdfde1a4f99ce5607d
|
[
"BSD-2-Clause"
] | 9
|
2020-02-21T23:55:13.000Z
|
2021-03-22T07:48:23.000Z
|
"""Provide the ReportableMixin class."""
from ....const import API_PATH
class ReportableMixin:
"""Interface for RedditBase classes that can be reported."""
def report(self, reason):
"""Report this object to the moderators of its subreddit.
:param reason: The reason for reporting.
Raises :class:`.APIException` if ``reason`` is longer than 100
characters.
Example usage:
.. code-block:: python
submission = reddit.submission(id='5or86n')
submission.report('report reason')
comment = reddit.comment(id='dxolpyc')
comment.report('report reason')
"""
self._reddit.post(
API_PATH["report"], data={"id": self.fullname, "reason": reason}
)
| 26
| 76
| 0.60641
| 83
| 780
| 5.662651
| 0.626506
| 0.029787
| 0.076596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010601
| 0.274359
| 780
| 29
| 77
| 26.896552
| 0.819788
| 0.597436
| 0
| 0
| 0
| 0
| 0.064815
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a49b924a41db77163a887ba4fb25f3e874556fc
| 3,158
|
py
|
Python
|
mellon/factories/filesystem/file.py
|
LaudateCorpus1/mellon
|
a7a9f6d8abf1dd03b63a94ddb4439c6cc6c2e272
|
[
"MIT"
] | 5
|
2016-12-20T19:39:01.000Z
|
2021-01-08T16:19:17.000Z
|
mellon/factories/filesystem/file.py
|
CrowdStrike/mellon
|
7216f255d397a41b1c2777a1b02f1c085d07ddfe
|
[
"MIT"
] | 1
|
2018-03-21T17:05:13.000Z
|
2018-03-21T17:05:13.000Z
|
mellon/factories/filesystem/file.py
|
LaudateCorpus1/mellon
|
a7a9f6d8abf1dd03b63a94ddb4439c6cc6c2e272
|
[
"MIT"
] | 2
|
2017-11-01T15:03:27.000Z
|
2018-11-13T03:04:44.000Z
|
import collections
import os.path
from zope import component
from zope import interface
from zope.component.factory import Factory
from sparc.configuration import container
import mellon
@interface.implementer(mellon.IByteMellonFile)
class MellonByteFileFromFilePathAndConfig(object):
def __init__(self, file_path, config):
self.file_path = file_path
self.config = config
def __str__(self):
return "byte file at location {}".format(self.file_path)
def __iter__(self):
with open(self.file_path, 'rb') as stream:
file_ = component.createObject(u'mellon.byte_file_from_stream', stream, self.config)
for snippet in file_:
yield snippet
mellonByteFileFromFilePathAndConfigFactory = Factory(MellonByteFileFromFilePathAndConfig)
@interface.implementer(mellon.IUnicodeMellonFile)
class MellonUnicodeFileFromFilePathAndConfig(object):
def __init__(self, file_path, config):
self.file_path = file_path
self.config = config
def __str__(self):
return "Unicode file at location {}".format(self.file_path)
def __iter__(self):
_end = 0
_buffer = collections.deque()
_eof_buffer = collections.deque()
with open(str(self.file_path), 'rU') as stream:
file_ = component.createObject(u'mellon.unicode_file_from_stream', stream, self.config)
for snippet in file_:
yield snippet
mellonUnicodeFileFromFilePathAndConfigFactory = Factory(MellonUnicodeFileFromFilePathAndConfig)
@interface.implementer(mellon.IMellonFileProvider)
class MellonFileProviderForRecursiveDirectoryConfig(object):
def __init__(self, config):
"""Init
Args:
config: sparc.configuration.container.ISparcAppPyContainerConfiguration
provider with
mellon.factories.filesystem[configure.yaml:FileSystemDir]
and mellon[configure.yaml:MellonSnippet] entries.
"""
self.config = config
def __iter__(self):
base_path = container.IPyContainerConfigValue(self.config).\
get('FileSystemDir')['directory']
for d, dirs, files in os.walk(base_path):
for f in files:
path = os.path.join(d, f)
if not os.path.isfile(path):
continue
#get interface-assigned string (IPath)
path = component.createObject(u'mellon.filesystem_path', path)
if mellon.IBinaryChecker(path).check():
yield component.createObject(\
u'mellon.factories.filesystem.byte_file', path, self.config)
else:
yield component.createObject(\
u'mellon.factories.filesystem.unicode_file', path, self.config)
mellonFileProviderForRecursiveDirectoryConfigFactory = Factory(MellonFileProviderForRecursiveDirectoryConfig)
interface.alsoProvides(mellonFileProviderForRecursiveDirectoryConfigFactory, mellon.IMellonFileProviderFactory)
| 39.974684
| 111
| 0.662445
| 290
| 3,158
| 7.003448
| 0.303448
| 0.047267
| 0.047267
| 0.068932
| 0.271787
| 0.271787
| 0.271787
| 0.181192
| 0.181192
| 0.181192
| 0
| 0.000428
| 0.260608
| 3,158
| 78
| 112
| 40.487179
| 0.869379
| 0.08993
| 0
| 0.315789
| 0
| 0
| 0.0836
| 0.056208
| 0
| 0
| 0
| 0
| 0
| 1
| 0.140351
| false
| 0
| 0.122807
| 0.035088
| 0.350877
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a4a8d86cea615c452f20cba99db27d3430077bf
| 4,840
|
py
|
Python
|
set1/c06_attack_repeating_key_xor.py
|
kangtastic/cryptopals
|
7014a08b836b3f9ebfdc889123ccf67406738dac
|
[
"WTFPL"
] | 1
|
2021-07-05T09:13:48.000Z
|
2021-07-05T09:13:48.000Z
|
set1/c06_attack_repeating_key_xor.py
|
kangtastic/cryptopals
|
7014a08b836b3f9ebfdc889123ccf67406738dac
|
[
"WTFPL"
] | null | null | null |
set1/c06_attack_repeating_key_xor.py
|
kangtastic/cryptopals
|
7014a08b836b3f9ebfdc889123ccf67406738dac
|
[
"WTFPL"
] | 1
|
2020-04-18T19:53:02.000Z
|
2020-04-18T19:53:02.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Break repeating-key XOR
#
# It is officially on, now.
#
# This challenge isn't conceptually hard, but it involves actual
# error-prone coding. The other challenges in this set are there to bring
# you up to speed. This one is there to qualify you. If you can do this
# one, you're probably just fine up to Set 6.
#
# There's a file here:
#
# http://cryptopals.com/static/challenge-data/6.txt
#
# It's been base64'd after being encrypted with repeating-key XOR.
#
# Decrypt it.
#
# Here's how:
#
# 1. Let KEYSIZE be the guessed length of the key; try values from 2 to
# (say) 40.
# 2. Write a function to compute the edit distance/Hamming distance between
# two strings. The Hamming distance is just the number of differing
# bits. The distance between:
#
# this is a test
#
# and
#
# wokka wokka!!!
#
# is 37. *Make sure your code agrees before you proceed.*
# 3. For each KEYSIZE, take the first KEYSIZE worth of bytes, and the
# second KEYSIZE worth of bytes, and find the edit distance between them.
# Normalize this result by dividing by KEYSIZE.
# 4. The KEYSIZE with the smallest normalized edit distance is probably the
# key. You could proceed perhaps with the smallest 2-3 KEYSIZE values.
# Or take 4 KEYSIZE blocks instead of 2 and average the distances.
# 5. Now that you probably know the KEYSIZE: break the ciphertext into
# blocks of KEYSIZE length.
# 6. Now transpose the blocks: make a block that is the first byte of every
# block, and a block that is the second byte of every block, and so on.
# 7. Solve each block as if it was single-character XOR. You already have
# code to do this.
# 8. For each block, the single-byte XOR key that produces the best looking
# histogram is the repeating-key XOR key byte for that block. Put them
# together and you have the key.
#
# This code is going to turn out to be surprisingly useful later on. Breaking
# repeating-key XOR ("Vigenère") statistically is obviously an academic
# exercise, a "Crypto 101" thing. But more people "know how" to break it than
# can actually break it, and a similar technique breaks something much more
# important.
#
# No, that's not a mistake.
#
# We get more tech support questions for this challenge than any of the
# other ones. We promise, there aren't any blatant errors in this text.
# In particular: the "wokka wokka!!!" edit distance really is 37.
#
import inspect
import os
import sys
from itertools import zip_longest
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda: 0)))))
from util.loader import loader
from util.text import englishness, repeating_key_xor, single_byte_xor
# Lookup table for the number of 1 bits in a nibble. (Nybble, quartet, etc.)
NIBBLE_BITS = [0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4]
def likely_key_sizes(bs, lower=2, upper=40, n=3):
"""Finds a repeating-key-XOR'd ciphertext's most likely key sizes."""
sizes = {}
for size in range(lower, upper + 1):
normalized_distance = 0
for i in range(0, len(bs) - size * 2, size * 2):
bs1, bs2 = bs[i : i + size], bs[i + size : i + size * 2]
normalized_distance += hamming_distance(bs1, bs2) / 2
sizes.update({size: normalized_distance})
return sorted(sizes, key=lambda k: sizes[k])[:n]
def hamming_distance(bs1, bs2):
"""Finds the Hamming distance between two bytestrings."""
distance = 0
for b1, b2 in zip_longest(bs1, bs2, fillvalue=0):
b = b1 ^ b2
distance += NIBBLE_BITS[b >> 4] + NIBBLE_BITS[b & 0xF]
return distance
def main():
ctext = loader("6.txt", "base64", split=False)
ptext, key, high_score = b"", b"", 0
for size in likely_key_sizes(ctext):
blocks = [ctext[i : i + size] for i in range(0, len(ctext), size)]
transposed = zip_longest(*blocks, fillvalue=0)
likely_key = b"".join(
single_byte_xor(tblock, key=True) for tblock in transposed
)
candidate = repeating_key_xor(ctext, likely_key)
score = englishness(candidate)
if score > high_score:
ptext, key, high_score = candidate, likely_key, score
print(f"Key: '{key.decode()}'")
print()
print(ptext.decode())
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
# Output:
#
# Key: 'Terminator X: Bring the noise' (29 bytes)
#
# I'm back and I'm ringin' the bell
# A rockin' on the mike while the fly girls yell
# In ecstasy in the back of me
# Well that's my DJ Deshay cuttin' all them Z's
# Hittin' hard and the girlies goin' crazy
# Vanilla's on the mike, man I'm not lazy.
#
# <remainder of output omitted>
#
| 32.266667
| 94
| 0.667355
| 766
| 4,840
| 4.168407
| 0.385117
| 0.026308
| 0.032884
| 0.015659
| 0.054494
| 0.009396
| 0
| 0
| 0
| 0
| 0
| 0.021922
| 0.23657
| 4,840
| 149
| 95
| 32.483221
| 0.842219
| 0.604132
| 0
| 0
| 0
| 0
| 0.02187
| 0
| 0
| 0
| 0.00164
| 0
| 0
| 1
| 0.068182
| false
| 0.022727
| 0.136364
| 0
| 0.25
| 0.068182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a4a939ebfe3446641070ee1531f5dae14b39a3f
| 26,798
|
py
|
Python
|
c2nl/models/transformer.py
|
kopf-yhs/ncscos
|
8248aaad32d4d19c01d070bf0dfba7aab849ba1d
|
[
"MIT"
] | 22
|
2021-05-22T19:58:39.000Z
|
2022-03-20T03:43:51.000Z
|
c2nl/models/transformer.py
|
kopf-yhs/ncscos
|
8248aaad32d4d19c01d070bf0dfba7aab849ba1d
|
[
"MIT"
] | 1
|
2021-07-17T13:15:33.000Z
|
2022-02-24T13:59:14.000Z
|
c2nl/models/transformer.py
|
kopf-yhs/ncscos
|
8248aaad32d4d19c01d070bf0dfba7aab849ba1d
|
[
"MIT"
] | 2
|
2021-05-10T05:18:00.000Z
|
2022-02-24T19:01:50.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as f
from prettytable import PrettyTable
from c2nl.modules.char_embedding import CharEmbedding
from c2nl.modules.embeddings import Embeddings
from c2nl.modules.highway import Highway
from c2nl.encoders.transformer import TransformerEncoder
from c2nl.decoders.transformer import TransformerDecoder
from c2nl.inputters import constants
from c2nl.modules.global_attention import GlobalAttention
from c2nl.modules.copy_generator import CopyGenerator, CopyGeneratorCriterion
from c2nl.utils.misc import sequence_mask
class Embedder(nn.Module):
def __init__(self, args):
super(Embedder, self).__init__()
self.enc_input_size = 0
self.dec_input_size = 0
# at least one of word or char embedding options should be True
assert args.use_src_word or args.use_src_char
assert args.use_tgt_word or args.use_tgt_char
self.use_src_word = args.use_src_word
self.use_tgt_word = args.use_tgt_word
if self.use_src_word:
self.src_word_embeddings = Embeddings(args.emsize,
args.src_vocab_size,
constants.PAD)
self.enc_input_size += args.emsize
if self.use_tgt_word:
self.tgt_word_embeddings = Embeddings(args.emsize,
args.tgt_vocab_size,
constants.PAD)
self.dec_input_size += args.emsize
self.use_src_char = args.use_src_char
self.use_tgt_char = args.use_tgt_char
if self.use_src_char:
assert len(args.filter_size) == len(args.nfilters)
self.src_char_embeddings = CharEmbedding(args.n_characters,
args.char_emsize,
args.filter_size,
args.nfilters)
self.enc_input_size += sum(list(map(int, args.nfilters)))
self.src_highway_net = Highway(self.enc_input_size, num_layers=2)
if self.use_tgt_char:
assert len(args.filter_size) == len(args.nfilters)
self.tgt_char_embeddings = CharEmbedding(args.n_characters,
args.char_emsize,
args.filter_size,
args.nfilters)
self.dec_input_size += sum(list(map(int, args.nfilters)))
self.tgt_highway_net = Highway(self.dec_input_size, num_layers=2)
self.use_type = args.use_code_type
if self.use_type:
self.type_embeddings = nn.Embedding(len(constants.TOKEN_TYPE_MAP),
self.enc_input_size)
self.src_pos_emb = args.src_pos_emb
self.tgt_pos_emb = args.tgt_pos_emb
self.no_relative_pos = all(v == 0 for v in args.max_relative_pos)
if self.src_pos_emb and self.no_relative_pos:
self.src_pos_embeddings = nn.Embedding(args.max_src_len,
self.enc_input_size)
if self.tgt_pos_emb:
self.tgt_pos_embeddings = nn.Embedding(args.max_tgt_len + 2,
self.dec_input_size)
self.dropout = nn.Dropout(args.dropout_emb)
def forward(self,
sequence,
sequence_char,
sequence_type=None,
mode='encoder',
step=None):
if mode == 'encoder':
word_rep = None
if self.use_src_word:
word_rep = self.src_word_embeddings(sequence.unsqueeze(2)) # B x P x d
if self.use_src_char:
char_rep = self.src_char_embeddings(sequence_char) # B x P x f
if word_rep is None:
word_rep = char_rep
else:
word_rep = torch.cat((word_rep, char_rep), 2) # B x P x d+f
word_rep = self.src_highway_net(word_rep) # B x P x d+f
if self.use_type:
type_rep = self.type_embeddings(sequence_type)
word_rep = word_rep + type_rep
if self.src_pos_emb and self.no_relative_pos:
pos_enc = torch.arange(start=0,
end=word_rep.size(1)).type(torch.LongTensor)
pos_enc = pos_enc.expand(*word_rep.size()[:-1])
if word_rep.is_cuda:
pos_enc = pos_enc.cuda()
pos_rep = self.src_pos_embeddings(pos_enc)
word_rep = word_rep + pos_rep
elif mode == 'decoder':
word_rep = None
if self.use_tgt_word:
word_rep = self.tgt_word_embeddings(sequence.unsqueeze(2)) # B x P x d
if self.use_tgt_char:
char_rep = self.tgt_char_embeddings(sequence_char) # B x P x f
if word_rep is None:
word_rep = char_rep
else:
word_rep = torch.cat((word_rep, char_rep), 2) # B x P x d+f
word_rep = self.tgt_highway_net(word_rep) # B x P x d+f
if self.tgt_pos_emb:
if step is None:
pos_enc = torch.arange(start=0,
end=word_rep.size(1)).type(torch.LongTensor)
else:
pos_enc = torch.LongTensor([step]) # used in inference time
pos_enc = pos_enc.expand(*word_rep.size()[:-1])
if word_rep.is_cuda:
pos_enc = pos_enc.cuda()
pos_rep = self.tgt_pos_embeddings(pos_enc)
word_rep = word_rep + pos_rep
else:
raise ValueError('Unknown embedder mode!')
word_rep = self.dropout(word_rep)
return word_rep
class Encoder(nn.Module):
def __init__(self,
args,
input_size):
super(Encoder, self).__init__()
self.transformer = TransformerEncoder(num_layers=args.nlayers,
d_model=input_size,
heads=args.num_head,
d_k=args.d_k,
d_v=args.d_v,
d_ff=args.d_ff,
dropout=args.trans_drop,
max_relative_positions=args.max_relative_pos,
use_neg_dist=args.use_neg_dist)
self.use_all_enc_layers = args.use_all_enc_layers
if self.use_all_enc_layers:
self.layer_weights = nn.Linear(input_size, 1, bias=False)
def count_parameters(self):
return self.transformer.count_parameters()
def forward(self,
input,
input_len):
layer_outputs, _ = self.transformer(input, input_len) # B x seq_len x h
if self.use_all_enc_layers:
output = torch.stack(layer_outputs, dim=2) # B x seq_len x nlayers x h
layer_scores = self.layer_weights(output).squeeze(3)
layer_scores = f.softmax(layer_scores, dim=-1)
memory_bank = torch.matmul(output.transpose(2, 3),
layer_scores.unsqueeze(3)).squeeze(3)
else:
memory_bank = layer_outputs[-1]
return memory_bank, layer_outputs
class Decoder(nn.Module):
def __init__(self, args, input_size):
super(Decoder, self).__init__()
self.input_size = input_size
self.split_decoder = args.split_decoder and args.copy_attn
if self.split_decoder:
# Following (https://arxiv.org/pdf/1808.07913.pdf), we split decoder
self.transformer_c = TransformerDecoder(
num_layers=args.nlayers,
d_model=self.input_size,
heads=args.num_head,
d_k=args.d_k,
d_v=args.d_v,
d_ff=args.d_ff,
coverage_attn=args.coverage_attn,
dropout=args.trans_drop
)
self.transformer_d = TransformerDecoder(
num_layers=args.nlayers,
d_model=self.input_size,
heads=args.num_head,
d_k=args.d_k,
d_v=args.d_v,
d_ff=args.d_ff,
dropout=args.trans_drop
)
# To accomplish eq. 19 - 21 from `https://arxiv.org/pdf/1808.07913.pdf`
self.fusion_sigmoid = nn.Sequential(
nn.Linear(self.input_size * 2, self.input_size),
nn.Sigmoid()
)
self.fusion_gate = nn.Sequential(
nn.Linear(self.input_size * 2, self.input_size),
nn.ReLU()
)
else:
self.transformer = TransformerDecoder(
num_layers=args.nlayers,
d_model=self.input_size,
heads=args.num_head,
d_k=args.d_k,
d_v=args.d_v,
d_ff=args.d_ff,
coverage_attn=args.coverage_attn,
dropout=args.trans_drop
)
if args.reload_decoder_state:
state_dict = torch.load(
args.reload_decoder_state, map_location=lambda storage, loc: storage
)
self.decoder.load_state_dict(state_dict)
def count_parameters(self):
if self.split_decoder:
return self.transformer_c.count_parameters() + self.transformer_d.count_parameters()
else:
return self.transformer.count_parameters()
def init_decoder(self,
src_lens,
max_src_len):
if self.split_decoder:
state_c = self.transformer_c.init_state(src_lens, max_src_len)
state_d = self.transformer_d.init_state(src_lens, max_src_len)
return state_c, state_d
else:
return self.transformer.init_state(src_lens, max_src_len)
def decode(self,
tgt_words,
tgt_emb,
memory_bank,
state,
step=None,
layer_wise_coverage=None):
if self.split_decoder:
copier_out, attns = self.transformer_c(tgt_words,
tgt_emb,
memory_bank,
state[0],
step=step,
layer_wise_coverage=layer_wise_coverage)
dec_out, _ = self.transformer_d(tgt_words,
tgt_emb,
memory_bank,
state[1],
step=step)
f_t = self.fusion_sigmoid(torch.cat([copier_out, dec_out], dim=-1))
gate_input = torch.cat([copier_out, torch.mul(f_t, dec_out)], dim=-1)
decoder_outputs = self.fusion_gate(gate_input)
else:
decoder_outputs, attns = self.transformer(tgt_words,
tgt_emb,
memory_bank,
state,
step=step,
layer_wise_coverage=layer_wise_coverage)
return decoder_outputs, attns
def forward(self,
memory_bank,
memory_len,
tgt_pad_mask,
tgt_emb):
max_mem_len = memory_bank[0].shape[1] \
if isinstance(memory_bank, list) else memory_bank.shape[1]
state = self.init_decoder(memory_len, max_mem_len)
return self.decode(tgt_pad_mask, tgt_emb, memory_bank, state)
class Transformer(nn.Module):
"""Module that writes an answer for the question given a passage."""
def __init__(self, args, tgt_dict):
""""Constructor of the class."""
super(Transformer, self).__init__()
self.name = 'Transformer'
if len(args.max_relative_pos) != args.nlayers:
assert len(args.max_relative_pos) == 1
args.max_relative_pos = args.max_relative_pos * args.nlayers
self.embedder = Embedder(args)
self.encoder = Encoder(args, self.embedder.enc_input_size)
self.decoder = Decoder(args, self.embedder.dec_input_size)
self.layer_wise_attn = args.layer_wise_attn
self.generator = nn.Linear(self.decoder.input_size, args.tgt_vocab_size)
if args.share_decoder_embeddings:
if self.embedder.use_tgt_word:
assert args.emsize == self.decoder.input_size
self.generator.weight = self.embedder.tgt_word_embeddings.word_lut.weight
self._copy = args.copy_attn
if self._copy:
self.copy_attn = GlobalAttention(dim=self.decoder.input_size,
attn_type=args.attn_type)
self.copy_generator = CopyGenerator(self.decoder.input_size,
tgt_dict,
self.generator)
self.criterion = CopyGeneratorCriterion(vocab_size=len(tgt_dict),
force_copy=args.force_copy)
else:
self.criterion = nn.CrossEntropyLoss(reduction='none')
def _run_forward_ml(self,
code_word_rep,
code_char_rep,
code_type_rep,
code_len,
summ_word_rep,
summ_char_rep,
summ_len,
tgt_seq,
src_map,
alignment,
**kwargs):
batch_size = code_len.size(0)
# embed and encode the source sequence
code_rep = self.embedder(code_word_rep,
code_char_rep,
code_type_rep,
mode='encoder')
memory_bank, layer_wise_outputs = self.encoder(code_rep, code_len) # B x seq_len x h
# embed and encode the target sequence
summ_emb = self.embedder(summ_word_rep,
summ_char_rep,
mode='decoder')
summ_pad_mask = ~sequence_mask(summ_len, max_len=summ_emb.size(1))
enc_outputs = layer_wise_outputs if self.layer_wise_attn else memory_bank
layer_wise_dec_out, attns = self.decoder(enc_outputs,
code_len,
summ_pad_mask,
summ_emb)
decoder_outputs = layer_wise_dec_out[-1]
loss = dict()
target = tgt_seq[:, 1:].contiguous()
if self._copy:
# copy_score: batch_size, tgt_len, src_len
_, copy_score, _ = self.copy_attn(decoder_outputs,
memory_bank,
memory_lengths=code_len,
softmax_weights=False)
# mask copy_attn weights here if needed
if kwargs['code_mask_rep'] is not None:
mask = kwargs['code_mask_rep'].byte().unsqueeze(1) # Make it broadcastable.
copy_score.data.masked_fill_(mask, -float('inf'))
attn_copy = f.softmax(copy_score, dim=-1)
scores = self.copy_generator(decoder_outputs, attn_copy, src_map)
scores = scores[:, :-1, :].contiguous()
ml_loss = self.criterion(scores,
alignment[:, 1:].contiguous(),
target)
else:
scores = self.generator(decoder_outputs) # `batch x tgt_len x vocab_size`
scores = scores[:, :-1, :].contiguous() # `batch x tgt_len - 1 x vocab_size`
ml_loss = self.criterion(scores.view(-1, scores.size(2)),
target.view(-1))
ml_loss = ml_loss.view(*scores.size()[:-1])
ml_loss = ml_loss.mul(target.ne(constants.PAD).float())
ml_loss = ml_loss.sum(1) * kwargs['example_weights']
loss['ml_loss'] = ml_loss.mean()
loss['loss_per_token'] = ml_loss.div((summ_len - 1).float()).mean()
return loss
def forward(self,
code_word_rep,
code_char_rep,
code_type_rep,
code_len,
summ_word_rep,
summ_char_rep,
summ_len,
tgt_seq,
src_map,
alignment,
**kwargs):
"""
Input:
- code_word_rep: ``(batch_size, max_doc_len)``
- code_char_rep: ``(batch_size, max_doc_len, max_word_len)``
- code_len: ``(batch_size)``
- summ_word_rep: ``(batch_size, max_que_len)``
- summ_char_rep: ``(batch_size, max_que_len, max_word_len)``
- summ_len: ``(batch_size)``
- tgt_seq: ``(batch_size, max_len)``
Output:
- ``(batch_size, P_LEN)``, ``(batch_size, P_LEN)``
"""
if self.training:
return self._run_forward_ml(code_word_rep,
code_char_rep,
code_type_rep,
code_len,
summ_word_rep,
summ_char_rep,
summ_len,
tgt_seq,
src_map,
alignment,
**kwargs)
else:
return self.decode(code_word_rep,
code_char_rep,
code_type_rep,
code_len,
src_map,
alignment,
**kwargs)
def __tens2sent(self,
t,
tgt_dict,
src_vocabs):
words = []
for idx, w in enumerate(t):
widx = w[0].item()
if widx < len(tgt_dict):
words.append(tgt_dict[widx])
else:
widx = widx - len(tgt_dict)
words.append(src_vocabs[idx][widx])
return words
def __generate_sequence(self,
params,
choice='greedy',
tgt_words=None):
batch_size = params['memory_bank'].size(0)
use_cuda = params['memory_bank'].is_cuda
if tgt_words is None:
tgt_words = torch.LongTensor([constants.BOS])
if use_cuda:
tgt_words = tgt_words.cuda()
tgt_words = tgt_words.expand(batch_size).unsqueeze(1) # B x 1
tgt_chars = None
if self.embedder.use_tgt_char:
tgt_chars = params['tgt_dict'].word_to_char_ids(constants.BOS_WORD)
tgt_chars = torch.Tensor(tgt_chars.tolist()).unsqueeze(0)
tgt_chars = tgt_chars.repeat(batch_size, 1)
tgt_chars = tgt_chars.to(tgt_words).unsqueeze(1)
dec_preds = []
copy_info = []
attentions = []
dec_log_probs = []
acc_dec_outs = []
max_mem_len = params['memory_bank'][0].shape[1] \
if isinstance(params['memory_bank'], list) else params['memory_bank'].shape[1]
dec_states = self.decoder.init_decoder(params['src_len'], max_mem_len)
attns = {"coverage": None}
enc_outputs = params['layer_wise_outputs'] if self.layer_wise_attn \
else params['memory_bank']
# +1 for <EOS> token
for idx in range(params['max_len'] + 1):
tgt = self.embedder(tgt_words,
tgt_chars,
mode='decoder',
step=idx)
tgt_pad_mask = tgt_words.data.eq(constants.PAD)
layer_wise_dec_out, attns = self.decoder.decode(tgt_pad_mask,
tgt,
enc_outputs,
dec_states,
step=idx,
layer_wise_coverage=attns['coverage'])
decoder_outputs = layer_wise_dec_out[-1]
acc_dec_outs.append(decoder_outputs.squeeze(1))
if self._copy:
_, copy_score, _ = self.copy_attn(decoder_outputs,
params['memory_bank'],
memory_lengths=params['src_len'],
softmax_weights=False)
# mask copy_attn weights here if needed
if params['src_mask'] is not None:
mask = params['src_mask'].byte().unsqueeze(1) # Make it broadcastable.
copy_score.data.masked_fill_(mask, -float('inf'))
attn_copy = f.softmax(copy_score, dim=-1)
prediction = self.copy_generator(decoder_outputs,
attn_copy,
params['src_map'])
prediction = prediction.squeeze(1)
for b in range(prediction.size(0)):
if params['blank'][b]:
blank_b = torch.LongTensor(params['blank'][b])
fill_b = torch.LongTensor(params['fill'][b])
if use_cuda:
blank_b = blank_b.cuda()
fill_b = fill_b.cuda()
prediction[b].index_add_(0, fill_b,
prediction[b].index_select(0, blank_b))
prediction[b].index_fill_(0, blank_b, 1e-10)
else:
prediction = self.generator(decoder_outputs.squeeze(1))
prediction = f.softmax(prediction, dim=1)
if choice == 'greedy':
tgt_prob, tgt = torch.max(prediction, dim=1, keepdim=True)
log_prob = torch.log(tgt_prob + 1e-20)
elif choice == 'sample':
tgt, log_prob = self.reinforce.sample(prediction.unsqueeze(1))
else:
assert False
dec_log_probs.append(log_prob.squeeze(1))
dec_preds.append(tgt.squeeze(1).clone())
if "std" in attns:
# std_attn: batch_size x num_heads x 1 x src_len
std_attn = torch.stack(attns["std"], dim=1)
attentions.append(std_attn.squeeze(2))
if self._copy:
mask = tgt.gt(len(params['tgt_dict']) - 1)
copy_info.append(mask.float().squeeze(1))
words = self.__tens2sent(tgt, params['tgt_dict'], params['source_vocab'])
tgt_chars = None
if self.embedder.use_tgt_char:
tgt_chars = [params['tgt_dict'].word_to_char_ids(w).tolist() for w in words]
tgt_chars = torch.Tensor(tgt_chars).to(tgt).unsqueeze(1)
words = [params['tgt_dict'][w] for w in words]
words = torch.Tensor(words).type_as(tgt)
tgt_words = words.unsqueeze(1)
return dec_preds, attentions, copy_info, dec_log_probs
def decode(self,
code_word_rep,
code_char_rep,
code_type_rep,
code_len,
src_map,
alignment,
**kwargs):
word_rep = self.embedder(code_word_rep,
code_char_rep,
code_type_rep,
mode='encoder')
memory_bank, layer_wise_outputs = self.encoder(word_rep, code_len) # B x seq_len x h
params = dict()
params['memory_bank'] = memory_bank
params['layer_wise_outputs'] = layer_wise_outputs
params['src_len'] = code_len
params['source_vocab'] = kwargs['source_vocab']
params['src_map'] = src_map
params['src_mask'] = kwargs['code_mask_rep']
params['fill'] = kwargs['fill']
params['blank'] = kwargs['blank']
params['src_dict'] = kwargs['src_dict']
params['tgt_dict'] = kwargs['tgt_dict']
params['max_len'] = kwargs['max_len']
params['src_words'] = code_word_rep
dec_preds, attentions, copy_info, _ = self.__generate_sequence(params, choice='greedy')
dec_preds = torch.stack(dec_preds, dim=1)
copy_info = torch.stack(copy_info, dim=1) if copy_info else None
# attentions: batch_size x tgt_len x num_heads x src_len
attentions = torch.stack(attentions, dim=1) if attentions else None
return {
'predictions': dec_preds,
'copy_info': copy_info,
'memory_bank': memory_bank,
'attentions': attentions
}
def count_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def count_encoder_parameters(self):
return self.encoder.count_parameters()
def count_decoder_parameters(self):
return self.decoder.count_parameters()
def layer_wise_parameters(self):
table = PrettyTable()
table.field_names = ["Layer Name", "Output Shape", "Param #"]
table.align["Layer Name"] = "l"
table.align["Output Shape"] = "r"
table.align["Param #"] = "r"
for name, parameters in self.named_parameters():
if parameters.requires_grad:
table.add_row([name, str(list(parameters.shape)), parameters.numel()])
return table
| 42.334913
| 98
| 0.504926
| 2,930
| 26,798
| 4.305802
| 0.104096
| 0.026078
| 0.008561
| 0.002536
| 0.394499
| 0.336002
| 0.287254
| 0.245244
| 0.233672
| 0.212429
| 0
| 0.00806
| 0.412008
| 26,798
| 632
| 99
| 42.401899
| 0.7926
| 0.048996
| 0
| 0.382409
| 0
| 0
| 0.02635
| 0
| 0
| 0
| 0
| 0
| 0.013384
| 1
| 0.038241
| false
| 0
| 0.024857
| 0.007648
| 0.107075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a4c4690f289d0da27d1fd0d344a2302e88669f6
| 3,344
|
py
|
Python
|
cattle/plugins/docker/delegate.py
|
cjellick/python-agent
|
6991369e309d050a43cba770df6e8ddd758f671d
|
[
"Apache-2.0"
] | 8
|
2015-07-20T15:29:25.000Z
|
2018-06-27T13:30:13.000Z
|
cattle/plugins/docker/delegate.py
|
cjellick/python-agent
|
6991369e309d050a43cba770df6e8ddd758f671d
|
[
"Apache-2.0"
] | 47
|
2015-07-13T23:47:35.000Z
|
2020-07-31T16:06:34.000Z
|
cattle/plugins/docker/delegate.py
|
cjellick/python-agent
|
6991369e309d050a43cba770df6e8ddd758f671d
|
[
"Apache-2.0"
] | 21
|
2015-08-21T01:58:47.000Z
|
2021-01-24T11:59:25.000Z
|
import logging
from cattle import Config
from cattle.utils import reply, popen
from .compute import DockerCompute
from cattle.agent.handler import BaseHandler
from cattle.progress import Progress
from cattle.type_manager import get_type, MARSHALLER
from . import docker_client
import subprocess
import os
import time
log = logging.getLogger('docker')
def ns_exec(pid, event):
script = os.path.join(Config.home(), 'events', event.name.split(';')[0])
cmd = ['nsenter',
'-F',
'-m',
'-u',
'-i',
'-n',
'-p',
'-t', str(pid),
'--', script]
marshaller = get_type(MARSHALLER)
input = marshaller.to_string(event)
data = None
env = {}
with open('/proc/{}/environ'.format(pid)) as f:
for line in f.read().split('\0'):
if not len(line):
continue
kv = line.split('=', 1)
if kv[0].startswith('CATTLE'):
env[kv[0]] = kv[1]
env['PATH'] = os.environ['PATH']
env['CATTLE_CONFIG_URL'] = Config.config_url()
for i in range(3):
p = popen(cmd,
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, error = p.communicate(input=input)
retcode = p.poll()
if retcode == 0:
break
exists_cmd = cmd[:-1] + ['/usr/bin/test', '-e', script]
if popen(exists_cmd, env=env).wait() == 0:
break
# Sleep and try again if missing
time.sleep(1)
if retcode:
return retcode, output, None
text = []
for line in output.splitlines():
if line.startswith('{'):
data = marshaller.from_string(line)
break
text.append(line)
return retcode, ''.join(text), data
class DockerDelegate(BaseHandler):
def __init__(self):
self.compute = DockerCompute()
pass
def events(self):
return ['delegate.request']
def delegate_request(self, req=None, event=None, instanceData=None, **kw):
if instanceData.kind != 'container' or \
instanceData.get('token') is None:
return
container = self.compute.get_container(docker_client(), instanceData,
by_agent=True)
if container is None:
log.info('Can not call [%s], container does not exists',
instanceData.uuid)
return
inspect = self.compute.inspect(container)
try:
running = inspect['State']['Running']
if not running:
log.error('Can not call [%s], container is not running',
instanceData.uuid)
return
except KeyError:
log.error('Can not call [%s], container is not running',
instanceData.uuid)
return
progress = Progress(event, parent=req)
exit_code, output, data = ns_exec(inspect['State']['Pid'], event)
if exit_code == 0:
return reply(event, data, parent=req)
else:
progress.update('Update failed', data={
'exitCode': exit_code,
'output': output
})
| 27.636364
| 78
| 0.535287
| 366
| 3,344
| 4.825137
| 0.36612
| 0.028313
| 0.016988
| 0.018686
| 0.08154
| 0.070215
| 0.070215
| 0.070215
| 0.070215
| 0.070215
| 0
| 0.005479
| 0.345096
| 3,344
| 120
| 79
| 27.866667
| 0.800913
| 0.008971
| 0
| 0.126316
| 0
| 0
| 0.093297
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042105
| false
| 0.010526
| 0.115789
| 0.010526
| 0.252632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6a4da8a95b67b63d32309af5c23df6977103484a
| 6,391
|
py
|
Python
|
bitraider/strategy.py
|
ehickox2012/bitraider
|
dcc695b93dc1c22415780e3f5ff9f7ee29d6988c
|
[
"MIT"
] | 2
|
2015-03-05T22:28:43.000Z
|
2015-03-12T23:07:54.000Z
|
bitraider/strategy.py
|
ehickox/bitraider
|
dcc695b93dc1c22415780e3f5ff9f7ee29d6988c
|
[
"MIT"
] | 2
|
2015-04-05T21:13:59.000Z
|
2015-04-05T21:16:05.000Z
|
bitraider/strategy.py
|
ehickox/bitraider
|
dcc695b93dc1c22415780e3f5ff9f7ee29d6988c
|
[
"MIT"
] | 1
|
2015-08-16T18:53:00.000Z
|
2015-08-16T18:53:00.000Z
|
import sys
import pytz
#import xml.utils.iso8601
import time
import numpy
from datetime import date, datetime, timedelta
from matplotlib import pyplot as plt
from exchange import cb_exchange as cb_exchange
from exchange import CoinbaseExchangeAuth
from abc import ABCMeta, abstractmethod
class strategy(object):
"""`strategy` defines an abstract base strategy class. Minimum required to create a strategy is a file with a class which inherits from strategy containing a backtest_strategy function. As a bonus, strategy includes utility functions like calculate_historic_data.
"""
__metaclass__ = ABCMeta
def __init__(name="default name", interval=5):
"""Constructor for an abstract strategy. You can modify it as needed.
\n`interval`: a.k.a timeslice the amount of time in seconds for each 'tick' default is 5
\n`name`: a string name for the strategy
"""
self.name = name
self.interval = interval
self.times_recalculated = 0
@abstractmethod
def trade(self, timeslice):
"""Perform operations on a timeslice.
\n`timeslice`: a section of trade data with time length equal to the strategy's interval, formatted as follows:
\n[time, low, high, open, close, volume]
"""
return
def backtest_strategy(self, historic_data):
"""Returns performance of a strategy vs market performance.
"""
# Reverse the data since Coinbase returns it in reverse chronological
# now historic_data strarts with the oldest entry
historic_data = list(reversed(historic_data))
earliest_time = float(historic_data[0][0])
latest_time = float(historic_data[-1][0])
start_price = float(historic_data[0][4])
end_price = float(historic_data[-1][4])
market_performance = ((end_price-start_price)/start_price)*100
print("Running simulation on historic data. This may take some time....")
for timeslice in historic_data:
# Display what percent through the data we are
idx = historic_data.index(timeslice)
percent = (float(idx)/float(len(historic_data)))*100 + 1
sys.stdout.write("\r%d%%" % percent)
sys.stdout.flush()
self.trade(timeslice)
# Calculate performance
end_amt_no_trades = (float(self.exchange.start_usd)/float(end_price)) + float(self.exchange.start_btc)
end_amt = (float(self.exchange.usd_bal)/float(end_price)) + float(self.exchange.btc_bal)
start_amt = (float(self.exchange.start_usd)/float(start_price)) + float(self.exchange.start_btc)
strategy_performance = ((end_amt-start_amt)/start_amt)*100
print("\n")
print("Times recalculated: "+str(self.times_recalculated))
print("Times bought: "+str(self.exchange.times_bought))
print("Times sold: "+str(self.exchange.times_sold))
print("The Market's performance: "+str(market_performance)+" %")
print("Strategy's performance: "+str(strategy_performance)+" %")
print("Account's ending value if no trades were made: "+str(end_amt_no_trades)+" BTC")
print("Account's ending value with this strategy: "+str(end_amt)+" BTC")
strategy_performance_vs_market = strategy_performance - market_performance
if strategy_performance > market_performance:
print("Congratulations! This strategy has beat the market by: "+str(strategy_performance_vs_market)+" %")
elif strategy_performance < market_performance:
print("This strategy has preformed: "+str(strategy_performance_vs_market)+" % worse than market.")
return strategy_performance_vs_market, strategy_performance, market_performance
@staticmethod
def calculate_historic_data(data, pivot):
"""Returns average price weighted according to volume, and the number of bitcoins traded
above and below a price point, called a pivot.\n
\npivot: the price used for returning volume above and below
\ndata: a list of lists formated as follows [time, low, high, open, close]
\n[
\n\t["2014-11-07 22:19:28.578544+00", "0.32", "4.2", "0.35", "4.2", "12.3"],
\n\t\t...
\n]
"""
price_list = []
weights = []
if data is None:
pass
min_price = float(data[0][1])
max_price = float(data[0][2])
discrete_prices = {}
for timeslice in data:
timeslice = [float(i) for i in timeslice]
if max_price < timeslice[2]:
max_prie = timeslice[2]
if min_price > timeslice[1]:
min_price = timeslice[1]
closing_price = timeslice[4]
volume = timeslice[5]
if closing_price not in discrete_prices.keys():
discrete_prices[str(closing_price)] = volume
else:
discrete[str(closing_price)] += volume
idx = data.index(timeslice)
price_list.append(closing_price)
weights.append(volume)
fltprices = [float(i) for i in discrete_prices.keys()]
fltvolumes = [float(i) for i in discrete_prices.values()]
np_discrete_prices = numpy.array(fltprices)
np_volume_per_price = numpy.array(fltvolumes)
weighted_avg = numpy.average(np_discrete_prices, weights=np_volume_per_price)
num_above = 0
num_below = 0
num_at = 0
for key in discrete_prices.keys():
value = discrete_prices[key]
if float(key) > pivot:
num_above+=value
elif float(key) < pivot:
num_below+=value
elif float(key) == pivot:
num_at+=value
total_volume = 0.0
for volume in fltvolumes:
total_volume+=volume
fltprops = []
for volume in fltvolumes:
fltprops.append((volume/total_volume))
#print("num_below: "+str(num_below))
#print("num_above: "+str(num_above))
#print("num_at: "+str(num_at))
#print("weighted_average: "+str(weighted_avg))
#plt.title("Price distribution")
#plt.xlabel("Price (USD)")
#plt.ylabel("Volume")
#plt.bar(fltprices, fltprops)
#plt.show()
return weighted_avg, num_above, num_below
| 42.324503
| 267
| 0.636833
| 801
| 6,391
| 4.913858
| 0.283396
| 0.042683
| 0.025915
| 0.022358
| 0.158283
| 0.096799
| 0.045224
| 0.032012
| 0
| 0
| 0
| 0.01592
| 0.26287
| 6,391
| 150
| 268
| 42.606667
| 0.819571
| 0.264434
| 0
| 0.020619
| 0
| 0
| 0.083315
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041237
| false
| 0.010309
| 0.092784
| 0
| 0.185567
| 0.113402
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbde2669ec80772673d5f19711266d806c399444
| 7,303
|
py
|
Python
|
biggan_discovery/orojar_discover.py
|
andreasjansson/OroJaR
|
ebb8c0333bbd33c063b6dd4a21a0559eb86d13e9
|
[
"BSD-2-Clause"
] | 47
|
2021-07-26T07:54:06.000Z
|
2022-02-07T16:37:40.000Z
|
biggan_discovery/orojar_discover.py
|
andreasjansson/OroJaR
|
ebb8c0333bbd33c063b6dd4a21a0559eb86d13e9
|
[
"BSD-2-Clause"
] | 1
|
2021-09-14T07:26:15.000Z
|
2021-09-14T07:45:59.000Z
|
biggan_discovery/orojar_discover.py
|
andreasjansson/OroJaR
|
ebb8c0333bbd33c063b6dd4a21a0559eb86d13e9
|
[
"BSD-2-Clause"
] | 7
|
2021-08-21T07:33:35.000Z
|
2022-03-16T23:21:29.000Z
|
"""
Learns a matrix of Z-Space directions using a pre-trained BigGAN Generator.
Modified from train.py in the PyTorch BigGAN repo.
"""
import os
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim
import utils
import train_fns
from sync_batchnorm import patch_replication_callback
from torch.utils.tensorboard import SummaryWriter
from orojar import orojar
from direction_utils import visualize_directions, load_G, get_direction_padding_fn, init_wandb, download_G
from layers import fast_gram_schmidt, norm
class DataParallelLoss(nn.Module):
"""
This is simply a wrapper class to compute the OroJaR efficiently over several GPUs
"""
def __init__(self, G):
super(DataParallelLoss, self).__init__()
self.G = G
def forward(self, z, y, w, Q):
penalty = orojar(self.G, z, c=y, w=w, G_z=None, Q=Q, multiple_layers=False)
return penalty
# The main training file. Config is a dictionary specifying the configuration
# of this training run.
def run(config):
if config['wandb_entity'] is not None:
init_wandb(config, config['experiment_name'], config['wandb_entity'], 'imagenet')
if config["G_path"] is None: # Download a pre-trained G if necessary
download_G()
config["G_path"] = 'checkpoints/138k'
G, state_dict, device, experiment_name = load_G(config)
# If parallel, parallelize the GD module
if config['parallel']:
G = nn.DataParallel(DataParallelLoss(G))
if config['cross_replica']:
patch_replication_callback(G)
num_gpus = torch.cuda.device_count()
print(f'Using {num_gpus} GPUs')
# If search_space != 'all', then we need to pad the z components that we are leaving alone:
pad = get_direction_padding_fn(config)
direction_size = config['dim_z'] if config['search_space'] == 'all' else config['ndirs']
# A is our (ndirs, |z|) matrix of directions, where ndirs indicates the number of directions we want to learn
if config['load_A'] == 'coords':
print('Initializing with standard basis directions')
A = torch.nn.Parameter(torch.eye(config['ndirs'], direction_size, device=device), requires_grad=True)
elif config['load_A'] == 'random':
print('Initializing with random directions')
A = torch.nn.Parameter(torch.empty(config['ndirs'], direction_size, device=device), requires_grad=True)
torch.nn.init.kaiming_normal_(A)
else:
raise NotImplementedError
# We only learn A; G is left frozen during training:
optim = torch.optim.Adam(params=[A], lr=config['A_lr'])
# Allow for different batch sizes in G
G_batch_size = max(config['G_batch_size'], config['batch_size'])
z_, y_ = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'],
device=device, fp16=config['G_fp16'])
# Prepare a fixed z & y to see individual sample evolution throghout training
fixed_z, fixed_y = utils.prepare_z_y(G_batch_size, G.module.G.dim_z,
config['n_classes'], device=device,
fp16=config['G_fp16'])
fixed_z.sample_()
fixed_y.sample_()
interp_z, interp_y = utils.prepare_z_y(config["n_samples"], G.module.G.dim_z,
config['n_classes'], device=device,
fp16=config['G_fp16'])
interp_z.sample_()
interp_y.sample_()
if config['fix_class'] is not None:
y_ = y_.new_full(y_.size(), config['fix_class'])
fixed_y = fixed_y.new_full(fixed_y.size(), config['fix_class'])
interp_y = interp_y.new_full(interp_y.size(), config['fix_class'])
print('Beginning training at epoch %d...' % state_dict['epoch'])
# Train for specified number of epochs, although we mostly track G iterations.
iters_per_epoch = 1000
dummy_loader = [None] * iters_per_epoch # We don't need any real data
path_size = config['path_size']
# Simply stores a |z|-dimensional one-hot vector indicating each direction we are learning:
direction_indicators = torch.eye(config['ndirs']).to(device)
G.eval()
G.module.optim = optim
writer = SummaryWriter('%s/%s' % (config['logs_root'], experiment_name))
sample_sheet = train_fns.save_and_sample(G.module.G, None, G.module.G, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name)
writer.add_image('samples', sample_sheet, 0)
interp_y_ = G.module.G.shared(interp_y)
norm_fn = norm
# Make directions orthonormal via Gram Schmidt followed a normalization:
Q = pad(norm_fn(fast_gram_schmidt(A))) if not config["no_ortho"] else pad(A)
if config["vis_during_training"]:
print("Generating initial visualizations...")
interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q,
high_quality=False, npv=1)
for w_ix in range(config['ndirs']):
writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], 0, fps=24)
for epoch in range(state_dict['epoch'], config['num_epochs']):
if config['pbar'] == 'mine':
pbar = utils.progress(dummy_loader, displaytype='s1k' if config['use_multiepoch_sampler'] else 'eta')
else:
pbar = tqdm(dummy_loader)
for i, _ in enumerate(pbar):
state_dict['itr'] += 1
z_.sample_()
if config['fix_class'] is None:
y_.sample_()
y = G.module.G.shared(y_)
# OroJaR taken w.r.t. w_sampled, NOT z:
w = torch.zeros((G_batch_size, config['ndirs'])) # equal to the one-hot w
penalty = G(z_, y, w=w, Q=Q.repeat(num_gpus, 1)).mean()
optim.zero_grad()
penalty.backward()
optim.step()
# re-orthogonalize A for visualizations and the next training iteration:
Q = pad(norm_fn(fast_gram_schmidt(A))) if not config["no_ortho"] else pad(A)
# Log metrics to TensorBoard/WandB:
cur_training_iter = epoch * iters_per_epoch + i
writer.add_scalar(f'Metrics/orojar', penalty.item(), cur_training_iter)
writer.add_scalar('Metrics/direction_norm', A.pow(2).mean().pow(0.5).item(), cur_training_iter)
# Save directions and log visuals:
if not (state_dict['itr'] % config['save_every']):
torch.save(A.cpu().detach(), '%s/%s/A_%06d.pt' %
(config['weights_root'], experiment_name, cur_training_iter))
if config["vis_during_training"]:
interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q,
high_quality=False, npv=1)
for w_ix in range(config['ndirs']):
writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], cur_training_iter, fps=24)
state_dict['epoch'] += 1
def main():
# parse command line and run
parser = utils.prepare_parser()
config = vars(parser.parse_args())
print(config)
run(config)
if __name__ == '__main__':
main()
| 41.259887
| 113
| 0.633301
| 1,007
| 7,303
| 4.365442
| 0.285005
| 0.021838
| 0.016379
| 0.009554
| 0.223157
| 0.188581
| 0.163103
| 0.163103
| 0.163103
| 0.139445
| 0
| 0.007323
| 0.252088
| 7,303
| 176
| 114
| 41.494318
| 0.79751
| 0.169656
| 0
| 0.136752
| 0
| 0
| 0.119874
| 0.007305
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034188
| false
| 0
| 0.102564
| 0
| 0.153846
| 0.051282
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbde9d29ec27efc6184d1b64557b595e4c3e0755
| 6,837
|
py
|
Python
|
packer/resources/bootstrap_node.py
|
VIOOH/nile
|
893802387b3891ea02aae05f39ff4aa051354f18
|
[
"Apache-2.0"
] | 4
|
2021-07-09T15:55:04.000Z
|
2021-12-28T10:34:12.000Z
|
packer/resources/bootstrap_node.py
|
Kishore88/nile
|
893802387b3891ea02aae05f39ff4aa051354f18
|
[
"Apache-2.0"
] | null | null | null |
packer/resources/bootstrap_node.py
|
Kishore88/nile
|
893802387b3891ea02aae05f39ff4aa051354f18
|
[
"Apache-2.0"
] | 3
|
2021-07-09T15:55:09.000Z
|
2021-07-10T10:24:02.000Z
|
#!/usr/bin/env python3
import os
import re
import glob
import boto3
import requests
import subprocess
from time import sleep
AWS_REGION = os.environ['AWS_REGION']
DEPLOY_UUID = os.environ['DEPLOY_UUID']
SERVICE_NAME = os.environ['SERVICE_NAME']
MOUNT_POINT = "/var/lib/" + SERVICE_NAME
NIC_IP = os.environ['NIC_IP']
TAG_KEY = os.environ['TAG_KEY']
def retrieve_eni_ids():
ec2 = boto3.resource('ec2')
enis = []
for eni in ec2.network_interfaces.all():
for tag in eni.tag_set:
if tag['Key'] == TAG_KEY:
if tag['Value'] == DEPLOY_UUID:
enis.append(eni.network_interface_id)
return enis if len(enis) > 0 else None
def attach_eni_ids():
c_ec2 = boto3.client('ec2')
r_ec2 = boto3.resource('ec2')
i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text
eni_ids = retrieve_eni_ids()
device_number = len(r_ec2.Instance(i_id).network_interfaces) + 1
for eni_id in eni_ids:
c_ec2.attach_network_interface(DeviceIndex=device_number, InstanceId=i_id, NetworkInterfaceId=eni_id)
def retrieve_ebs_ids():
ec2 = boto3.resource('ec2')
ebss = []
for volume in ec2.volumes.all():
if volume.tags is not None:
for tag in volume.tags:
if tag['Key'] == TAG_KEY:
if tag['Value'] == DEPLOY_UUID:
ebss.append(volume.volume_id)
return ebss if len(ebss) > 0 else None
def attach_ebs():
ec2 = boto3.client('ec2')
i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text
volume_ids = retrieve_ebs_ids()
i = 0
device_char = 'z'
while i < len(volume_ids):
v_id = volume_ids[i]
device = '/dev/xvd{0}'.format(device_char)
ec2.attach_volume(Device=device, InstanceId=i_id, VolumeId=v_id)
# Wait to ensure device is attached
sleep(3)
if not check_ebs(v_id):
prepare_ebs(v_id)
add_fstab_entries(v_id, MOUNT_POINT)
p_mount = subprocess.Popen('mount -a'.split(), stdout=subprocess.PIPE)
stdout, stderr = p_mount.communicate()
p_chown = subprocess.Popen('chown -R {0}:{0} {1}'.format(SERVICE_NAME, MOUNT_POINT).split(),
stdout=subprocess.PIPE)
stdout, stderr = p_chown.communicate()
device_char = chr(ord(device_char) - 1)
i += 1
def check_ebs(volume_id):
v_id = volume_id.replace('vol-', 'vol')
pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id)
return bool(len(glob.glob(pattern)))
def prepare_ebs(volume_id):
v_id = volume_id.replace('vol-', 'vol')
pattern = '/dev/disk/by-id/*{0}'.format(v_id)
device = glob.glob(pattern)[0]
gdisk_commands = '\n'.join([
'n',
'1',
'34',
'',
'',
'w',
'Y',
''
])
p_echo = subprocess.Popen('echo -ne {0}'.format(gdisk_commands).split(' '), stdout=subprocess.PIPE)
p_fdisk = subprocess.Popen('gdisk {0}'.format(device).split(), stdin=p_echo.stdout, stdout=subprocess.PIPE)
stdout, stderr = p_fdisk.communicate()
print(stdout)
print(stderr)
# p_partprobe = subprocess.Popen('partprobe'.split(' '), stdout=subprocess.PIPE)
# stdout, stderr = p_partprobe.communicate()
# print(stdout)
# print(stderr)
sleep(3)
pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id)
partition = glob.glob(pattern)[0]
p_xfs = subprocess.Popen('mkfs.xfs {0}'.format(partition).split(), stdout=subprocess.PIPE)
stdout, stderr = p_xfs.communicate()
print(stdout)
print(stderr)
def add_fstab_entries(volume_id, mount_point):
v_id = volume_id.replace('vol-', 'vol')
pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id)
partition = glob.glob(pattern)[0]
fstab_entries = [
mount_point,
'xfs',
'defaults',
'0',
'0'
]
with open('/etc/fstab', 'a') as f:
f.write('{0} {1}\n'.format(partition, ' '.join(fstab_entries)))
f.flush()
f.close()
def wait_device_ready(timeout=3):
c = 0
while c < timeout:
sleep(1)
p_ip = subprocess.Popen('ip a'.split(), stdout=subprocess.PIPE)
stdout, stderr = p_ip.communicate()
for line in stdout.decode().splitlines():
res = re.match('.*inet {0}/[0-9]{{2}}'.format(NIC_IP), line)
if res is not None:
return None
c += 1
raise Exception('Device with address {0} not ready'.format(NIC_IP))
def change_default_route():
wait_device_ready(10)
p_ip = subprocess.Popen('ip r'.split(), stdout=subprocess.PIPE)
stdout, stderr = p_ip.communicate()
r_subnet_rules = []
for line in stdout. decode().splitlines():
res = re.match('(.* ){2}eth[0-9](?! $).*', line)
if res is not None:
subnet_rule = res.group(0)
l_subnet_rule = subnet_rule.split()
device = l_subnet_rule[2]
ip = l_subnet_rule[-1]
r_subnet_rules.append(
{
'device': device,
'ip': ip,
'subnet_rule': subnet_rule
}
)
r_default_route = ''
for line in stdout.decode().splitlines():
res = re.match('default .*', line)
if res is not None:
r_default_route = res.group(0)
break
with open('/etc/rc.local', 'a') as f:
f.write('#!/bin/bash\n\n')
rule_index = 128
default_route_device = ''
for rule in r_subnet_rules:
default_route = re.sub('eth.', rule['device'], r_default_route)
f.write('ip rule add from {0} table {1}\n'.format(rule['ip'], rule_index))
f.write('ip r add {0} table {1}\n'.format(default_route, rule_index))
f.write('ip r add {0} table {1}\n\n'.format(rule['subnet_rule'], rule_index))
if rule['ip'] == NIC_IP:
default_route_device = rule['device']
rule_index += 1
default_route = re.sub('eth.', default_route_device, r_default_route)
f.write('ip r del default\n')
f.write('ip r add {0}\n\n'.format(default_route))
f.write('exit 0\n')
f.flush()
f.close()
os.chmod('/etc/rc.local', 0o0755)
p_rc_local = subprocess.Popen('/etc/rc.local'.split(), stdout=subprocess.PIPE)
stdout, stderr = p_rc_local.communicate()
if __name__ == '__main__':
boto3.setup_default_session(region_name=AWS_REGION)
# uses: DEPLOY_UUID, TAG_KEY
attach_eni_ids()
# uses: MOUNT_POINT, SERVICE_NAME, DEPLOY_UUID, TAG_KEY
attach_ebs()
# uses: NIC_IP
change_default_route()
| 27.130952
| 111
| 0.584321
| 928
| 6,837
| 4.104526
| 0.192888
| 0.040956
| 0.047257
| 0.052507
| 0.373851
| 0.283802
| 0.257548
| 0.203465
| 0.192964
| 0.133893
| 0
| 0.022873
| 0.271025
| 6,837
| 251
| 112
| 27.239044
| 0.741372
| 0.043733
| 0
| 0.213018
| 0
| 0
| 0.116404
| 0.011947
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053254
| false
| 0
| 0.04142
| 0
| 0.118343
| 0.023669
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbdf10789c79bc37376b7fcca6ae9a0b284ccf83
| 4,412
|
py
|
Python
|
parsers/srum_parser.py
|
otoriocyber/Chronos
|
d70e22afed723c0ad4b7e449bd253e15351bada6
|
[
"MIT"
] | 12
|
2021-04-20T23:08:28.000Z
|
2022-02-18T01:23:42.000Z
|
parsers/srum_parser.py
|
otoriocyber/chronos
|
d70e22afed723c0ad4b7e449bd253e15351bada6
|
[
"MIT"
] | null | null | null |
parsers/srum_parser.py
|
otoriocyber/chronos
|
d70e22afed723c0ad4b7e449bd253e15351bada6
|
[
"MIT"
] | null | null | null |
import csv
import datetime
import random
import os
from parsers.parser_base import ParserBase
FILE_TIME_EPOCH = datetime.datetime(1601, 1, 1)
FILE_TIME_MICROSECOND = 10
def filetime_to_epoch_datetime(file_time):
if isinstance(file_time, int):
microseconds_since_file_time_epoch = file_time / FILE_TIME_MICROSECOND
else:
microseconds_since_file_time_epoch = int(file_time) / FILE_TIME_MICROSECOND
return FILE_TIME_EPOCH + datetime.timedelta(microseconds=microseconds_since_file_time_epoch)
class SrumParser(ParserBase):
CSV_FIELDS = {
"Unknown1.csv": ["TimeStamp", "AppId", "UserId", "EndTime", "DurationMS"],
"Unknown2.csv": [],
"Unknown3.csv": [],
"Unknown4.csv": ["TimeStamp", "AppId", "UserId"],
"SruDbCheckpointTable.csv": [],
"SruDbIdMapTable.csv": [],
"Network Usage.csv": ["TimeStamp", "AppId", "UserId", "InterfaceLuid", "L2ProfileId", "BytesSent",
"BytesRecvd"],
"Network Connections.csv": [],
"Energy Usage.csv": [],
"Energy Usage(Long - Term).csv": [],
"Application Resources.csv": ["TimeStamp", "AppId", "UserId"],
"Application Resource Usage.csv": ["TimeStamp", "AppId", "UserId"]
}
PARSING_TOOL = r"Tools\ese-analyst-master\ese2csv.exe"
PARSE_COMMAND = "{parser_path} -o {output_path} -p srudb_plugin {srum_db} --plugin-args {software_hive}"
def __init__(self, temp, config):
super().__init__(config)
self.temp_result_path = temp
def parse(self, args):
srum_db, software_hive = args
output = r"{}\srum_{}".format(self.temp_result_path, random.randint(1, 1000000))
os.mkdir(output)
command = self.PARSE_COMMAND.format(parser_path=self.PARSING_TOOL, output_path=output, srum_db=srum_db,
software_hive=software_hive)
self._run_command(command)
for csv_file in os.listdir(output):
srum_records = []
full_path = os.path.join(output, csv_file)
headers = self.CSV_FIELDS.get(csv_file)
if not headers:
continue
if csv_file == "Unknown1.csv":
with open(full_path, "r") as f:
reader = csv.DictReader(f)
for line in reader:
cur_record = {}
endTime = line.get("EndTime")
duration = line.get("DurationMS")
if endTime and duration:
cur_record["time"] = filetime_to_epoch_datetime(int(endTime) - int(duration)).isoformat()
cur_record["EndTime"] = filetime_to_epoch_datetime(endTime).isoformat()
cur_record["DurationMS"] = duration
else:
cur_record["time"] = datetime.datetime(1970, 1, 1).isoformat()
cur_record["AppId"] = line.get("AppId")
cur_record["UserId"] = line.get("UserId")
srum_records.append(cur_record)
else:
with open(full_path, "r") as f:
reader = csv.DictReader(f)
for line in reader:
cur_record = {}
for header in headers:
if header == "TimeStamp":
cur_record["time"] = line.get("TimeStamp").replace(" ", "T")
line.pop("TimeStamp")
value = line.get(header)
if value:
if isinstance(value, bytes):
cur_record[header.lower().replace(" ", "_")] = value.decode()
elif str.isdigit(value):
cur_record[header.lower().replace(" ", "_")] = int(value)
else:
cur_record[header.lower().replace(" ", "_")] = value
else:
cur_record[header.lower().replace(" ", "_")] = ""
srum_records.append(cur_record)
self._write_results_list([("srum-{}".format(csv_file.split(".")[0].lower().replace(" ", "_")), srum_records)])
| 45.020408
| 122
| 0.522439
| 428
| 4,412
| 5.140187
| 0.299065
| 0.061364
| 0.029545
| 0.052273
| 0.232273
| 0.105455
| 0.090909
| 0.058182
| 0.058182
| 0.058182
| 0
| 0.010567
| 0.356528
| 4,412
| 97
| 123
| 45.484536
| 0.764354
| 0
| 0
| 0.178571
| 0
| 0.011905
| 0.147779
| 0.013599
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.059524
| 0
| 0.154762
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbe088a01c052a1745bf75ba9a62254a5f03f63b
| 4,829
|
py
|
Python
|
track.py
|
AliabbasMerchant/fileTrackAndBackup
|
8cdf97be58c69061e1f60c08f89b524d91f8c17d
|
[
"MIT"
] | 6
|
2018-08-11T12:00:11.000Z
|
2021-06-15T09:11:34.000Z
|
track.py
|
AliabbasMerchant/fileTrackAndBackup
|
8cdf97be58c69061e1f60c08f89b524d91f8c17d
|
[
"MIT"
] | null | null | null |
track.py
|
AliabbasMerchant/fileTrackAndBackup
|
8cdf97be58c69061e1f60c08f89b524d91f8c17d
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3
from help import *
import time
# short-forms are used, so as to reduce the .json file size
# t : type - d or f
# d : directory
# f : file
# ts : timestamp
# dirs : The dictionary containing info about directory contents
# time : edit time of the file/folder
# s : size of the file/folder
# p : full path of the file/folder
# n : name of the main file/folder in the .json file
# i : info about the contents in the .json file
# folder = {'t': 'd', 's': get_size(dir_dict), 'p': full_path + '/' + entity, 'time': get_time(stats), 'dirs': dir_dict}
# file = {'t': 'f', 's': stats.st_size, 'p': full_path + '/' + entity, 'time': get_time(stats)}
# info = {'t': 'd', 's': size, 'p': base_path, 'time': get_time(stats), 'dirs': info}
# write = {'n': examine_name, 'ts': time.time(), 'i': info}
# info = {'t': 'f', 's': stats.st_size, 'p': base_path, 'time': get_time(stats)}
# write = {'n': examine_name, 'ts': time.time(), 'i': info}
no_of_files = 0
no_of_dirs = 0
examine_name = ''
save_filename = ''
_base_path = None
_ignore = False
errors = []
def get_save_config(base_path: str) -> None:
global examine_name, save_filename
examine_name = base_path.strip().split('/')[-1]
save_filename = examine_name + '.json'
if not os.path.lexists(constants.save_folder_name):
execute_bash("mkdir " + constants.save_folder_name)
def get_info_dict(sub_path: str) -> dict:
global no_of_files, no_of_dirs, _base_path, _ignore, errors
full_path = _base_path + '/' + sub_path
full_path = full_path.strip()
if full_path.endswith('/'):
full_path = full_path[:-1]
edit_dict = dict()
try:
entity_list = os.listdir(full_path)
for entity in entity_list:
ignore_it = False
if _ignore and to_be_ignored(full_path + '/' + entity): # ignoring cache temp etc files
ignore_it = True
if not ignore_it:
try:
stats = os.stat(full_path + '/' + entity)
if not os.path.islink(full_path + '/' + entity):
if os.path.isdir(full_path + '/' + entity):
no_of_dirs += 1
new_sub_path = sub_path + '/' + entity
dir_dict = get_info_dict(new_sub_path)
edit_dict[entity] = {'t': 'd', 's': get_size(dir_dict), 'p': full_path + '/' + entity,
'time': get_time(stats), 'dirs': dir_dict}
if os.path.isfile(full_path + '/' + entity):
no_of_files += 1
edit_dict[entity] = {'t': 'f', 's': stats.st_size, 'p': full_path + '/' + entity,
'time': get_time(stats)}
except FileNotFoundError:
errors.append(full_path + '/' + entity)
except PermissionError:
errors.append(full_path)
return edit_dict
def track(base_path: str, dir_path: str, output: bool = False, ignore: bool = False) -> list:
global _base_path, no_of_dirs, no_of_files, save_filename, _ignore, errors
no_of_dirs = 0
no_of_files = 0
print("Tracking...")
_base_path = base_path
_ignore = ignore
get_save_config(base_path)
if _ignore:
get_ignore_list()
if os.path.isdir(base_path):
info = get_info_dict('')
size = get_size(info)
no_of_dirs += 1
stats = os.stat(base_path)
info = {'t': 'd', 's': size, 'p': base_path, 'time': get_time(stats), 'dirs': info}
write = {'n': examine_name, 'ts': time.time(), 'i': info}
write_to_json_file(write, constants.save_folder_name + "/" + save_filename)
if output:
print("Successfully analysed the folder " + base_path)
print("Found {} folder(s)".format(no_of_dirs))
print("Found {} file(s)".format(no_of_files))
print("The directory is of size {}".format(get_size_format(size)))
print("A detailed report can be found using the 'file_tb.py print [FILE/FOLDER]' command ")
else:
no_of_files += 1
stats = os.stat(base_path)
info = {'t': 'f', 's': stats.st_size, 'p': base_path, 'time': get_time(stats)}
write = {'n': examine_name, 'ts': time.time(), 'i': info}
write_to_json_file(write, constants.save_folder_name + "/" + save_filename)
if output:
print("Successfully analysed the file")
print("The file is of size {}".format(get_size_format(stats.st_size)))
print("A detailed report can be found using the 'file_tb.py print [FILE/FOLDER]' command ")
# pp(info)
return errors
if __name__ == '__main__':
track(os.getcwd(), os.getcwd(), output=True)
| 40.923729
| 120
| 0.57631
| 658
| 4,829
| 3.971125
| 0.18845
| 0.058171
| 0.053578
| 0.048986
| 0.385763
| 0.355913
| 0.355913
| 0.335247
| 0.319939
| 0.319939
| 0
| 0.003178
| 0.283288
| 4,829
| 117
| 121
| 41.273504
| 0.751806
| 0.190516
| 0
| 0.238095
| 0
| 0
| 0.103368
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.02381
| 0
| 0.083333
| 0.107143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbe08be9b24ad6685aafe893f7f5de89e33519df
| 31,693
|
py
|
Python
|
clang/tools/scan-build-py/libscanbuild/analyze.py
|
Kvarnefalk/llvm-project
|
8b5f5798aaa24074609d151ea906d114cf5337c2
|
[
"Apache-2.0"
] | 1
|
2021-02-17T04:40:38.000Z
|
2021-02-17T04:40:38.000Z
|
clang/tools/scan-build-py/libscanbuild/analyze.py
|
Kvarnefalk/llvm-project
|
8b5f5798aaa24074609d151ea906d114cf5337c2
|
[
"Apache-2.0"
] | null | null | null |
clang/tools/scan-build-py/libscanbuild/analyze.py
|
Kvarnefalk/llvm-project
|
8b5f5798aaa24074609d151ea906d114cf5337c2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
""" This module implements the 'scan-build' command API.
To run the static analyzer against a build is done in multiple steps:
-- Intercept: capture the compilation command during the build,
-- Analyze: run the analyzer against the captured commands,
-- Report: create a cover report from the analyzer outputs. """
import re
import os
import os.path
import json
import logging
import multiprocessing
import tempfile
import functools
import subprocess
import contextlib
import datetime
import shutil
import glob
from collections import defaultdict
from libscanbuild import command_entry_point, compiler_wrapper, \
wrapper_environment, run_build, run_command, CtuConfig
from libscanbuild.arguments import parse_args_for_scan_build, \
parse_args_for_analyze_build
from libscanbuild.intercept import capture
from libscanbuild.report import document
from libscanbuild.compilation import split_command, classify_source, \
compiler_language
from libscanbuild.clang import get_version, get_arguments, get_triple_arch, \
ClangErrorException
from libscanbuild.shell import decode
__all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper']
COMPILER_WRAPPER_CC = 'analyze-cc'
COMPILER_WRAPPER_CXX = 'analyze-c++'
CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt'
CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps'
@command_entry_point
def scan_build():
""" Entry point for scan-build command. """
args = parse_args_for_scan_build()
# will re-assign the report directory as new output
with report_directory(
args.output, args.keep_empty, args.output_format) as args.output:
# Run against a build command. there are cases, when analyzer run
# is not required. But we need to set up everything for the
# wrappers, because 'configure' needs to capture the CC/CXX values
# for the Makefile.
if args.intercept_first:
# Run build command with intercept module.
exit_code = capture(args)
# Run the analyzer against the captured commands.
if need_analyzer(args.build):
govern_analyzer_runs(args)
else:
# Run build command and analyzer with compiler wrappers.
environment = setup_environment(args)
exit_code = run_build(args.build, env=environment)
# Cover report generation and bug counting.
number_of_bugs = document(args)
# Set exit status as it was requested.
return number_of_bugs if args.status_bugs else exit_code
@command_entry_point
def analyze_build():
""" Entry point for analyze-build command. """
args = parse_args_for_analyze_build()
# will re-assign the report directory as new output
with report_directory(args.output, args.keep_empty, args.output_format) as args.output:
# Run the analyzer against a compilation db.
govern_analyzer_runs(args)
# Cover report generation and bug counting.
number_of_bugs = document(args)
# Set exit status as it was requested.
return number_of_bugs if args.status_bugs else 0
def need_analyzer(args):
""" Check the intent of the build command.
When static analyzer run against project configure step, it should be
silent and no need to run the analyzer or generate report.
To run `scan-build` against the configure step might be necessary,
when compiler wrappers are used. That's the moment when build setup
check the compiler and capture the location for the build process. """
return len(args) and not re.search(r'configure|autogen', args[0])
def prefix_with(constant, pieces):
""" From a sequence create another sequence where every second element
is from the original sequence and the odd elements are the prefix.
eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] """
return [elem for piece in pieces for elem in [constant, piece]]
def get_ctu_config_from_args(args):
""" CTU configuration is created from the chosen phases and dir. """
return (
CtuConfig(collect=args.ctu_phases.collect,
analyze=args.ctu_phases.analyze,
dir=args.ctu_dir,
extdef_map_cmd=args.extdef_map_cmd)
if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir')
else CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd=''))
def get_ctu_config_from_json(ctu_conf_json):
""" CTU configuration is created from the chosen phases and dir. """
ctu_config = json.loads(ctu_conf_json)
# Recover namedtuple from json when coming from analyze-cc or analyze-c++
return CtuConfig(collect=ctu_config[0],
analyze=ctu_config[1],
dir=ctu_config[2],
extdef_map_cmd=ctu_config[3])
def create_global_ctu_extdef_map(extdef_map_lines):
""" Takes iterator of individual external definition maps and creates a
global map keeping only unique names. We leave conflicting names out of
CTU.
:param extdef_map_lines: Contains the id of a definition (mangled name) and
the originating source (the corresponding AST file) name.
:type extdef_map_lines: Iterator of str.
:returns: Mangled name - AST file pairs.
:rtype: List of (str, str) tuples.
"""
mangled_to_asts = defaultdict(set)
for line in extdef_map_lines:
mangled_name, ast_file = line.strip().split(' ', 1)
mangled_to_asts[mangled_name].add(ast_file)
mangled_ast_pairs = []
for mangled_name, ast_files in mangled_to_asts.items():
if len(ast_files) == 1:
mangled_ast_pairs.append((mangled_name, next(iter(ast_files))))
return mangled_ast_pairs
def merge_ctu_extdef_maps(ctudir):
""" Merge individual external definition maps into a global one.
As the collect phase runs parallel on multiple threads, all compilation
units are separately mapped into a temporary file in CTU_TEMP_DEFMAP_FOLDER.
These definition maps contain the mangled names and the source
(AST generated from the source) which had their definition.
These files should be merged at the end into a global map file:
CTU_EXTDEF_MAP_FILENAME."""
def generate_extdef_map_lines(extdefmap_dir):
""" Iterate over all lines of input files in a determined order. """
files = glob.glob(os.path.join(extdefmap_dir, '*'))
files.sort()
for filename in files:
with open(filename, 'r') as in_file:
for line in in_file:
yield line
def write_global_map(arch, mangled_ast_pairs):
""" Write (mangled name, ast file) pairs into final file. """
extern_defs_map_file = os.path.join(ctudir, arch,
CTU_EXTDEF_MAP_FILENAME)
with open(extern_defs_map_file, 'w') as out_file:
for mangled_name, ast_file in mangled_ast_pairs:
out_file.write('%s %s\n' % (mangled_name, ast_file))
triple_arches = glob.glob(os.path.join(ctudir, '*'))
for triple_path in triple_arches:
if os.path.isdir(triple_path):
triple_arch = os.path.basename(triple_path)
extdefmap_dir = os.path.join(ctudir, triple_arch,
CTU_TEMP_DEFMAP_FOLDER)
extdef_map_lines = generate_extdef_map_lines(extdefmap_dir)
mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines)
write_global_map(triple_arch, mangled_ast_pairs)
# Remove all temporary files
shutil.rmtree(extdefmap_dir, ignore_errors=True)
def run_analyzer_parallel(args):
""" Runs the analyzer against the given compilation database. """
def exclude(filename, directory):
""" Return true when any excluded directory prefix the filename. """
if not os.path.isabs(filename):
# filename is either absolute or relative to directory. Need to turn
# it to absolute since 'args.excludes' are absolute paths.
filename = os.path.normpath(os.path.join(directory, filename))
return any(re.match(r'^' + exclude_directory, filename)
for exclude_directory in args.excludes)
consts = {
'clang': args.clang,
'output_dir': args.output,
'output_format': args.output_format,
'output_failures': args.output_failures,
'direct_args': analyzer_params(args),
'force_debug': args.force_debug,
'ctu': get_ctu_config_from_args(args)
}
logging.debug('run analyzer against compilation database')
with open(args.cdb, 'r') as handle:
generator = (dict(cmd, **consts)
for cmd in json.load(handle) if not exclude(
cmd['file'], cmd['directory']))
# when verbose output requested execute sequentially
pool = multiprocessing.Pool(1 if args.verbose > 2 else None)
for current in pool.imap_unordered(run, generator):
if current is not None:
# display error message from the static analyzer
for line in current['error_output']:
logging.info(line.rstrip())
pool.close()
pool.join()
def govern_analyzer_runs(args):
""" Governs multiple runs in CTU mode or runs once in normal mode. """
ctu_config = get_ctu_config_from_args(args)
# If we do a CTU collect (1st phase) we remove all previous collection
# data first.
if ctu_config.collect:
shutil.rmtree(ctu_config.dir, ignore_errors=True)
# If the user asked for a collect (1st) and analyze (2nd) phase, we do an
# all-in-one run where we deliberately remove collection data before and
# also after the run. If the user asks only for a single phase data is
# left so multiple analyze runs can use the same data gathered by a single
# collection run.
if ctu_config.collect and ctu_config.analyze:
# CTU strings are coming from args.ctu_dir and extdef_map_cmd,
# so we can leave it empty
args.ctu_phases = CtuConfig(collect=True, analyze=False,
dir='', extdef_map_cmd='')
run_analyzer_parallel(args)
merge_ctu_extdef_maps(ctu_config.dir)
args.ctu_phases = CtuConfig(collect=False, analyze=True,
dir='', extdef_map_cmd='')
run_analyzer_parallel(args)
shutil.rmtree(ctu_config.dir, ignore_errors=True)
else:
# Single runs (collect or analyze) are launched from here.
run_analyzer_parallel(args)
if ctu_config.collect:
merge_ctu_extdef_maps(ctu_config.dir)
def setup_environment(args):
""" Set up environment for build command to interpose compiler wrapper. """
environment = dict(os.environ)
environment.update(wrapper_environment(args))
environment.update({
'CC': COMPILER_WRAPPER_CC,
'CXX': COMPILER_WRAPPER_CXX,
'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '',
'ANALYZE_BUILD_REPORT_DIR': args.output,
'ANALYZE_BUILD_REPORT_FORMAT': args.output_format,
'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '',
'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)),
'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '',
'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args))
})
return environment
@command_entry_point
def analyze_compiler_wrapper():
""" Entry point for `analyze-cc` and `analyze-c++` compiler wrappers. """
return compiler_wrapper(analyze_compiler_wrapper_impl)
def analyze_compiler_wrapper_impl(result, execution):
""" Implements analyzer compiler wrapper functionality. """
# don't run analyzer when compilation fails. or when it's not requested.
if result or not os.getenv('ANALYZE_BUILD_CLANG'):
return
# check is it a compilation?
compilation = split_command(execution.cmd)
if compilation is None:
return
# collect the needed parameters from environment, crash when missing
parameters = {
'clang': os.getenv('ANALYZE_BUILD_CLANG'),
'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'),
'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'),
'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'),
'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS',
'').split(' '),
'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'),
'directory': execution.cwd,
'command': [execution.cmd[0], '-c'] + compilation.flags,
'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU'))
}
# call static analyzer against the compilation
for source in compilation.files:
parameters.update({'file': source})
logging.debug('analyzer parameters %s', parameters)
current = run(parameters)
# display error message from the static analyzer
if current is not None:
for line in current['error_output']:
logging.info(line.rstrip())
@contextlib.contextmanager
def report_directory(hint, keep, output_format):
""" Responsible for the report directory.
hint -- could specify the parent directory of the output directory.
keep -- a boolean value to keep or delete the empty report directory. """
stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-'
stamp = datetime.datetime.now().strftime(stamp_format)
parent_dir = os.path.abspath(hint)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir)
logging.info('Report directory created: %s', name)
try:
yield name
finally:
if os.listdir(name):
if output_format != 'sarif':
# 'scan-view' currently does not support sarif format.
msg = "Run 'scan-view %s' to examine bug reports."
else:
msg = "View result at %s/results-merged.sarif."
keep = True
else:
if keep:
msg = "Report directory '%s' contains no report, but kept."
else:
msg = "Removing directory '%s' because it contains no report."
logging.warning(msg, name)
if not keep:
os.rmdir(name)
def analyzer_params(args):
""" A group of command line arguments can mapped to command
line arguments of the analyzer. This method generates those. """
result = []
if args.store_model:
result.append('-analyzer-store={0}'.format(args.store_model))
if args.constraints_model:
result.append('-analyzer-constraints={0}'.format(
args.constraints_model))
if args.internal_stats:
result.append('-analyzer-stats')
if args.analyze_headers:
result.append('-analyzer-opt-analyze-headers')
if args.stats:
result.append('-analyzer-checker=debug.Stats')
if args.maxloop:
result.extend(['-analyzer-max-loop', str(args.maxloop)])
if args.output_format:
result.append('-analyzer-output={0}'.format(args.output_format))
if args.analyzer_config:
result.extend(['-analyzer-config', args.analyzer_config])
if args.verbose >= 4:
result.append('-analyzer-display-progress')
if args.plugins:
result.extend(prefix_with('-load', args.plugins))
if args.enable_checker:
checkers = ','.join(args.enable_checker)
result.extend(['-analyzer-checker', checkers])
if args.disable_checker:
checkers = ','.join(args.disable_checker)
result.extend(['-analyzer-disable-checker', checkers])
return prefix_with('-Xclang', result)
def require(required):
""" Decorator for checking the required values in state.
It checks the required attributes in the passed state and stop when
any of those is missing. """
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
for key in required:
if key not in args[0]:
raise KeyError('{0} not passed to {1}'.format(
key, function.__name__))
return function(*args, **kwargs)
return wrapper
return decorator
@require(['command', # entry from compilation database
'directory', # entry from compilation database
'file', # entry from compilation database
'clang', # clang executable name (and path)
'direct_args', # arguments from command line
'force_debug', # kill non debug macros
'output_dir', # where generated report files shall go
'output_format', # it's 'plist', 'html', 'plist-html', 'plist-multi-file', or 'sarif'
'output_failures', # generate crash reports or not
'ctu']) # ctu control options
def run(opts):
""" Entry point to run (or not) static analyzer against a single entry
of the compilation database.
This complex task is decomposed into smaller methods which are calling
each other in chain. If the analysis is not possible the given method
just return and break the chain.
The passed parameter is a python dictionary. Each method first check
that the needed parameters received. (This is done by the 'require'
decorator. It's like an 'assert' to check the contract between the
caller and the called method.) """
try:
command = opts.pop('command')
command = command if isinstance(command, list) else decode(command)
logging.debug("Run analyzer against '%s'", command)
opts.update(classify_parameters(command))
return arch_check(opts)
except Exception:
logging.error("Problem occurred during analysis.", exc_info=1)
return None
@require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language',
'error_output', 'exit_code'])
def report_failure(opts):
""" Create report when analyzer failed.
The major report is the preprocessor output. The output filename generated
randomly. The compiler output also captured into '.stderr.txt' file.
And some more execution context also saved into '.info.txt' file. """
def extension():
""" Generate preprocessor file extension. """
mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'}
return mapping.get(opts['language'], '.i')
def destination():
""" Creates failures directory if not exits yet. """
failures_dir = os.path.join(opts['output_dir'], 'failures')
if not os.path.isdir(failures_dir):
os.makedirs(failures_dir)
return failures_dir
# Classify error type: when Clang terminated by a signal it's a 'Crash'.
# (python subprocess Popen.returncode is negative when child terminated
# by signal.) Everything else is 'Other Error'.
error = 'crash' if opts['exit_code'] < 0 else 'other_error'
# Create preprocessor output file name. (This is blindly following the
# Perl implementation.)
(handle, name) = tempfile.mkstemp(suffix=extension(),
prefix='clang_' + error + '_',
dir=destination())
os.close(handle)
# Execute Clang again, but run the syntax check only.
cwd = opts['directory']
cmd = [opts['clang'], '-fsyntax-only', '-E'] + opts['flags'] + \
[opts['file'], '-o', name]
try:
cmd = get_arguments(cmd, cwd)
run_command(cmd, cwd=cwd)
except subprocess.CalledProcessError:
pass
except ClangErrorException:
pass
# write general information about the crash
with open(name + '.info.txt', 'w') as handle:
handle.write(opts['file'] + os.linesep)
handle.write(error.title().replace('_', ' ') + os.linesep)
handle.write(' '.join(cmd) + os.linesep)
handle.write(' '.join(os.uname()) + os.linesep)
handle.write(get_version(opts['clang']))
handle.close()
# write the captured output too
with open(name + '.stderr.txt', 'w') as handle:
handle.writelines(opts['error_output'])
handle.close()
@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir',
'output_format'])
def run_analyzer(opts, continuation=report_failure):
""" It assembles the analysis command line and executes it. Capture the
output of the analysis and returns with it. If failure reports are
requested, it calls the continuation to generate it. """
def target():
""" Creates output file name for reports. """
if opts['output_format'] in {
'plist',
'plist-html',
'plist-multi-file'}:
(handle, name) = tempfile.mkstemp(prefix='report-',
suffix='.plist',
dir=opts['output_dir'])
os.close(handle)
return name
elif opts['output_format'] == 'sarif':
(handle, name) = tempfile.mkstemp(prefix='result-',
suffix='.sarif',
dir=opts['output_dir'])
os.close(handle)
return name
return opts['output_dir']
try:
cwd = opts['directory']
cmd = get_arguments([opts['clang'], '--analyze'] +
opts['direct_args'] + opts['flags'] +
[opts['file'], '-o', target()],
cwd)
output = run_command(cmd, cwd=cwd)
return {'error_output': output, 'exit_code': 0}
except subprocess.CalledProcessError as ex:
result = {'error_output': ex.output, 'exit_code': ex.returncode}
if opts.get('output_failures', False):
opts.update(result)
continuation(opts)
return result
except ClangErrorException as ex:
result = {'error_output': ex.error, 'exit_code': 0}
if opts.get('output_failures', False):
opts.update(result)
continuation(opts)
return result
def extdef_map_list_src_to_ast(extdef_src_list):
""" Turns textual external definition map list with source files into an
external definition map list with ast files. """
extdef_ast_list = []
for extdef_src_txt in extdef_src_list:
mangled_name, path = extdef_src_txt.split(" ", 1)
# Normalize path on windows as well
path = os.path.splitdrive(path)[1]
# Make relative path out of absolute
path = path[1:] if path[0] == os.sep else path
ast_path = os.path.join("ast", path + ".ast")
extdef_ast_list.append(mangled_name + " " + ast_path)
return extdef_ast_list
@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'ctu'])
def ctu_collect_phase(opts):
""" Preprocess source by generating all data needed by CTU analysis. """
def generate_ast(triple_arch):
""" Generates ASTs for the current compilation command. """
args = opts['direct_args'] + opts['flags']
ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast',
os.path.realpath(opts['file'])[1:] +
'.ast')
ast_path = os.path.abspath(ast_joined_path)
ast_dir = os.path.dirname(ast_path)
if not os.path.isdir(ast_dir):
try:
os.makedirs(ast_dir)
except OSError:
# In case an other process already created it.
pass
ast_command = [opts['clang'], '-emit-ast']
ast_command.extend(args)
ast_command.append('-w')
ast_command.append(opts['file'])
ast_command.append('-o')
ast_command.append(ast_path)
logging.debug("Generating AST using '%s'", ast_command)
run_command(ast_command, cwd=opts['directory'])
def map_extdefs(triple_arch):
""" Generate external definition map file for the current source. """
args = opts['direct_args'] + opts['flags']
extdefmap_command = [opts['ctu'].extdef_map_cmd]
extdefmap_command.append(opts['file'])
extdefmap_command.append('--')
extdefmap_command.extend(args)
logging.debug("Generating external definition map using '%s'",
extdefmap_command)
extdef_src_list = run_command(extdefmap_command, cwd=opts['directory'])
extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list)
extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch,
CTU_TEMP_DEFMAP_FOLDER)
if not os.path.isdir(extern_defs_map_folder):
try:
os.makedirs(extern_defs_map_folder)
except OSError:
# In case an other process already created it.
pass
if extdef_ast_list:
with tempfile.NamedTemporaryFile(mode='w',
dir=extern_defs_map_folder,
delete=False) as out_file:
out_file.write("\n".join(extdef_ast_list) + "\n")
cwd = opts['directory']
cmd = [opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] \
+ [opts['file']]
triple_arch = get_triple_arch(cmd, cwd)
generate_ast(triple_arch)
map_extdefs(triple_arch)
@require(['ctu'])
def dispatch_ctu(opts, continuation=run_analyzer):
""" Execute only one phase of 2 phases of CTU if needed. """
ctu_config = opts['ctu']
if ctu_config.collect or ctu_config.analyze:
assert ctu_config.collect != ctu_config.analyze
if ctu_config.collect:
return ctu_collect_phase(opts)
if ctu_config.analyze:
cwd = opts['directory']
cmd = [opts['clang'], '--analyze'] + opts['direct_args'] \
+ opts['flags'] + [opts['file']]
triarch = get_triple_arch(cmd, cwd)
ctu_options = ['ctu-dir=' + os.path.join(ctu_config.dir, triarch),
'experimental-enable-naive-ctu-analysis=true']
analyzer_options = prefix_with('-analyzer-config', ctu_options)
direct_options = prefix_with('-Xanalyzer', analyzer_options)
opts['direct_args'].extend(direct_options)
return continuation(opts)
@require(['flags', 'force_debug'])
def filter_debug_flags(opts, continuation=dispatch_ctu):
""" Filter out nondebug macros when requested. """
if opts.pop('force_debug'):
# lazy implementation just append an undefine macro at the end
opts.update({'flags': opts['flags'] + ['-UNDEBUG']})
return continuation(opts)
@require(['language', 'compiler', 'file', 'flags'])
def language_check(opts, continuation=filter_debug_flags):
""" Find out the language from command line parameters or file name
extension. The decision also influenced by the compiler invocation. """
accepted = frozenset({
'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output',
'c++-cpp-output', 'objective-c-cpp-output'
})
# language can be given as a parameter...
language = opts.pop('language')
compiler = opts.pop('compiler')
# ... or find out from source file extension
if language is None and compiler is not None:
language = classify_source(opts['file'], compiler == 'c')
if language is None:
logging.debug('skip analysis, language not known')
return None
elif language not in accepted:
logging.debug('skip analysis, language not supported')
return None
else:
logging.debug('analysis, language: %s', language)
opts.update({'language': language,
'flags': ['-x', language] + opts['flags']})
return continuation(opts)
@require(['arch_list', 'flags'])
def arch_check(opts, continuation=language_check):
""" Do run analyzer through one of the given architectures. """
disabled = frozenset({'ppc', 'ppc64'})
received_list = opts.pop('arch_list')
if received_list:
# filter out disabled architectures and -arch switches
filtered_list = [a for a in received_list if a not in disabled]
if filtered_list:
# There should be only one arch given (or the same multiple
# times). If there are multiple arch are given and are not
# the same, those should not change the pre-processing step.
# But that's the only pass we have before run the analyzer.
current = filtered_list.pop()
logging.debug('analysis, on arch: %s', current)
opts.update({'flags': ['-arch', current] + opts['flags']})
return continuation(opts)
else:
logging.debug('skip analysis, found not supported arch')
return None
else:
logging.debug('analysis, on default arch')
return continuation(opts)
# To have good results from static analyzer certain compiler options shall be
# omitted. The compiler flag filtering only affects the static analyzer run.
#
# Keys are the option name, value number of options to skip
IGNORED_FLAGS = {
'-c': 0, # compile option will be overwritten
'-fsyntax-only': 0, # static analyzer option will be overwritten
'-o': 1, # will set up own output file
# flags below are inherited from the perl implementation.
'-g': 0,
'-save-temps': 0,
'-install_name': 1,
'-exported_symbols_list': 1,
'-current_version': 1,
'-compatibility_version': 1,
'-init': 1,
'-e': 1,
'-seg1addr': 1,
'-bundle_loader': 1,
'-multiply_defined': 1,
'-sectorder': 3,
'--param': 1,
'--serialize-diagnostics': 1
}
def classify_parameters(command):
""" Prepare compiler flags (filters some and add others) and take out
language (-x) and architecture (-arch) flags for future processing. """
result = {
'flags': [], # the filtered compiler flags
'arch_list': [], # list of architecture flags
'language': None, # compilation language, None, if not specified
'compiler': compiler_language(command) # 'c' or 'c++'
}
# iterate on the compile options
args = iter(command[1:])
for arg in args:
# take arch flags into a separate basket
if arg == '-arch':
result['arch_list'].append(next(args))
# take language
elif arg == '-x':
result['language'] = next(args)
# parameters which looks source file are not flags
elif re.match(r'^[^-].+', arg) and classify_source(arg):
pass
# ignore some flags
elif arg in IGNORED_FLAGS:
count = IGNORED_FLAGS[arg]
for _ in range(count):
next(args)
# we don't care about extra warnings, but we should suppress ones
# that we don't want to see.
elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg):
pass
# and consider everything else as compilation flag.
else:
result['flags'].append(arg)
return result
| 39.175525
| 96
| 0.63686
| 3,956
| 31,693
| 4.954247
| 0.167088
| 0.012858
| 0.005102
| 0.008164
| 0.176182
| 0.123782
| 0.094546
| 0.079239
| 0.06434
| 0.056891
| 0
| 0.00286
| 0.260815
| 31,693
| 808
| 97
| 39.22401
| 0.833739
| 0.276055
| 0
| 0.168605
| 0
| 0
| 0.137175
| 0.026874
| 0
| 0
| 0
| 0
| 0.001938
| 1
| 0.069767
| false
| 0.013566
| 0.040698
| 0
| 0.182171
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbe1408e84afa0a04966d9f60dcf8a3847bfc25f
| 1,460
|
py
|
Python
|
tableborder.py
|
PIRXrav/pyhack
|
af5c86fb721053d8a3e819ab772c8144a23b86bf
|
[
"MIT"
] | null | null | null |
tableborder.py
|
PIRXrav/pyhack
|
af5c86fb721053d8a3e819ab772c8144a23b86bf
|
[
"MIT"
] | null | null | null |
tableborder.py
|
PIRXrav/pyhack
|
af5c86fb721053d8a3e819ab772c8144a23b86bf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# pylint: disable=C0103
# pylint: disable=R0902
# pylint: disable=R0903
# pylint: disable=R0913
"""
Définie la classe TableBorder
"""
class TableBorder:
"""
Facillite l'usage de l'UNICODE
"""
def __init__(self,
top_left, top_split, top_right,
mid_left, mid_split, mid_right,
low_left, low_split, low_right,
horizontal, vertical):
"""
Constructeur
"""
self.top_left = top_left
self.top_split = top_split
self.top_right = top_right
self.mid_left = mid_left
self.mid_split = mid_split
self.mid_right = mid_right
self.low_left = low_left
self.low_split = low_split
self.low_right = low_right
self.horizontal = horizontal
self.vertical = vertical
BORDERS = [TableBorder('+', '+', '+',\
'+', '+', '+',\
'+', '+', '+',\
'-', '|'),
TableBorder(u'\u250c', u'\u252C', u'\u2510',\
u'\u251C', u'\u253C', u'\u2524',\
u'\u2514', u'\u2534', u'\u2518',\
u'\u2500', u'\u2502'),
TableBorder(u'\u2554', u'\u2566', u'\u2557',\
u'\u2560', u'\u256C', u'\u2563',\
u'\u255a', u'\u2569', u'\u255d',\
u'\u2550', u'\u2551')
]
| 30.416667
| 56
| 0.469863
| 151
| 1,460
| 4.337748
| 0.370861
| 0.079389
| 0.033588
| 0.042748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107811
| 0.377397
| 1,460
| 47
| 57
| 31.06383
| 0.612761
| 0.125342
| 0
| 0.066667
| 0
| 0
| 0.116544
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbe1aa3b9d736d93221a08965b3b705efeef3804
| 216
|
py
|
Python
|
app/urls.py
|
tkf2019/Vue-Django-SAST-Search
|
385af9819c608ce2d0845ed3e786777ff52b52b3
|
[
"MIT"
] | null | null | null |
app/urls.py
|
tkf2019/Vue-Django-SAST-Search
|
385af9819c608ce2d0845ed3e786777ff52b52b3
|
[
"MIT"
] | null | null | null |
app/urls.py
|
tkf2019/Vue-Django-SAST-Search
|
385af9819c608ce2d0845ed3e786777ff52b52b3
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^register/', views.register),
url(r'^login/', views.login),
url(r'logout/', views.logout),
url(r'search/', views.search)
]
| 19.636364
| 39
| 0.643519
| 30
| 216
| 4.633333
| 0.433333
| 0.115108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175926
| 216
| 10
| 40
| 21.6
| 0.780899
| 0
| 0
| 0
| 0
| 0
| 0.143519
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbe1d984552acfc78008a25befd61632a445f85d
| 28,508
|
py
|
Python
|
custom_components/hasl/sensor.py
|
Ziqqo/hasl-platform
|
27386314bf58626538d59c38d89249b07ed9256a
|
[
"Apache-2.0"
] | null | null | null |
custom_components/hasl/sensor.py
|
Ziqqo/hasl-platform
|
27386314bf58626538d59c38d89249b07ed9256a
|
[
"Apache-2.0"
] | null | null | null |
custom_components/hasl/sensor.py
|
Ziqqo/hasl-platform
|
27386314bf58626538d59c38d89249b07ed9256a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Simple service for SL (Storstockholms Lokaltrafik)."""
import datetime
import json
import logging
from datetime import timedelta
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (ATTR_FRIENDLY_NAME, CONF_SCAN_INTERVAL,
CONF_SENSOR_TYPE, CONF_SENSORS, STATE_OFF,
STATE_ON)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import (async_track_point_in_utc_time,
async_track_utc_time_change,
track_time_interval)
from homeassistant.util import Throttle
from homeassistant.util.dt import now
from hasl import (haslapi, fpapi, tl2api, ri4api, si2api,
HASL_Error, HASL_API_Error, HASL_HTTP_Error)
__version__ = '2.2.0'
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'hasl'
# Keys used in the configuration.
CONF_RI4_KEY = 'ri4key'
CONF_SI2_KEY = 'si2key'
CONF_TL2_KEY = 'tl2key'
CONF_SITEID = 'siteid'
CONF_LINES = 'lines'
CONF_DIRECTION = 'direction'
CONF_ENABLED_SENSOR = 'sensor'
CONF_TIMEWINDOW = 'timewindow'
CONF_SENSORPROPERTY = 'property'
CONF_TRAIN_TYPE = 'train_type'
CONF_TRAFFIC_CLASS = 'traffic_class'
CONF_VERSION = 'version_sensor'
CONF_USE_MINIMIZATION = 'api_minimization'
LIST_SENSOR_TYPES = ['departures', 'status', 'trainlocation', 'comb', 'tl2']
LIST_SENSOR_PROPERTIES = ['min', 'time', 'deviations', 'refresh', 'updated']
LIST_TRAIN_TYPES = ['PT', 'RB', 'TVB', 'SB', 'LB', 'SpvC', 'TB1', 'TB2', 'TB3']
# Default values for configuration.
DEFAULT_INTERVAL = timedelta(minutes=10)
DEFAULT_TIMEWINDOW = 30
DEFAULT_DIRECTION = 0
DEFAULT_SENSORPROPERTY = 'min'
DEFAULT_TRAIN_TYPE = 'PT'
DEFAULT_TRAFFIC_CLASS = ['metro', 'train', 'local', 'tram', 'bus', 'fer']
DEFAULT_SENSORTYPE = 'departures'
DEFAULT_CACHE_FILE = '.storage/haslcache.json'
# Defining the configuration schema.
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
# API Keys
vol.Optional(CONF_RI4_KEY): cv.string,
vol.Optional(CONF_SI2_KEY): cv.string,
vol.Optional(CONF_TL2_KEY): cv.string,
vol.Optional(CONF_VERSION, default=False): cv.boolean,
vol.Optional(CONF_USE_MINIMIZATION, default=True): cv.boolean,
vol.Required(CONF_SENSORS, default=[]):
vol.All(cv.ensure_list, [vol.All({
vol.Required(ATTR_FRIENDLY_NAME): cv.string,
vol.Required(CONF_SENSOR_TYPE, default=DEFAULT_SENSORTYPE):
vol.In(LIST_SENSOR_TYPES),
vol.Optional(CONF_ENABLED_SENSOR): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL):
vol.Any(cv.time_period, cv.positive_timedelta),
vol.Optional(CONF_SITEID): cv.string,
vol.Optional(CONF_LINES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_DIRECTION, default=DEFAULT_DIRECTION):
vol.All(vol.Coerce(int), vol.Range(min=0, max=2)),
vol.Optional(CONF_TIMEWINDOW, default=DEFAULT_TIMEWINDOW):
vol.All(vol.Coerce(int), vol.Range(min=0, max=60)),
vol.Optional(CONF_SENSORPROPERTY, default=DEFAULT_SENSORPROPERTY):
vol.In(LIST_SENSOR_PROPERTIES),
vol.Optional(CONF_TRAFFIC_CLASS, default=DEFAULT_TRAFFIC_CLASS):
vol.All(cv.ensure_list, [vol.In(DEFAULT_TRAFFIC_CLASS)]),
vol.Optional(CONF_TRAIN_TYPE, default=DEFAULT_TRAIN_TYPE):
vol.In(LIST_TRAIN_TYPES)
})]),
}, extra=vol.ALLOW_EXTRA)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the sensors."""
if not hass.data.get(DOMAIN):
hass.data[DOMAIN] = {}
sensors = []
if config[CONF_VERSION]:
sensors.append(SLVersionSensor(hass))
_LOGGER.info("Created version sensor for HASL")
for sensorconf in config[CONF_SENSORS]:
if sensorconf[CONF_SENSOR_TYPE] == 'departures' or \
sensorconf[CONF_SENSOR_TYPE] == 'comb':
sitekey = sensorconf.get(CONF_SITEID)
si2key = config.get(CONF_SI2_KEY)
ri4key = config.get(CONF_RI4_KEY)
if sitekey and ri4key:
sensorname = sensorconf[ATTR_FRIENDLY_NAME]
sensors.append(SLDeparturesSensor(
hass,
si2key,
ri4key,
sitekey,
sensorconf.get(CONF_LINES),
sensorname,
sensorconf.get(CONF_ENABLED_SENSOR),
sensorconf.get(CONF_SCAN_INTERVAL),
sensorconf.get(CONF_DIRECTION),
sensorconf.get(CONF_TIMEWINDOW),
sensorconf.get(CONF_SENSORPROPERTY),
config.get(CONF_USE_MINIMIZATION)
))
_LOGGER.info("Created departures sensor %s...", sensorname)
else:
_LOGGER.error("Sensor %s is missing site, si2key or ri4key",
sensorconf[ATTR_FRIENDLY_NAME])
if sensorconf[CONF_SENSOR_TYPE] == 'status' or \
sensorconf[CONF_SENSOR_TYPE] == 'tl2':
tl2key = config.get(CONF_TL2_KEY)
if tl2key:
sensorname = sensorconf[ATTR_FRIENDLY_NAME]
sensors.append(SLStatusSensor(
hass,
tl2key,
sensorname,
sensorconf.get(CONF_ENABLED_SENSOR),
sensorconf.get(CONF_SCAN_INTERVAL),
sensorconf.get(CONF_TRAFFIC_CLASS),
config.get(CONF_USE_MINIMIZATION)
))
_LOGGER.info("Created status sensor %s...", sensorname)
else:
_LOGGER.error("Sensor %s is missing tl2key attribute",
sensorconf[ATTR_FRIENDLY_NAME])
if sensorconf[CONF_SENSOR_TYPE] == 'trainlocation':
train_type = sensorconf.get(CONF_TRAIN_TYPE)
if train_type:
sensorname = sensorconf[ATTR_FRIENDLY_NAME]
sensors.append(SLTrainLocationSensor(
hass,
sensorname,
train_type,
sensorconf.get(CONF_SCAN_INTERVAL),
sensorconf.get(CONF_ENABLED_SENSOR),
))
_LOGGER.info("Created train sensor %s...", sensorname)
else:
_LOGGER.error("Sensor %s is missing train_type attribute",
sensorconf[ATTR_FRIENDLY_NAME])
add_devices(sensors)
class SLTrainLocationSensor(Entity):
"""Trafic Situation Sensor."""
def __init__(self, hass, friendly_name, train_type,
interval, enabled_sensor):
self._hass = hass
self._fpapi = fpapi()
self._name = friendly_name
self._interval = interval
self._enabled_sensor = enabled_sensor
self._train_type = train_type
self._data = {}
self.update = Throttle(interval)(self._update)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
""" Return the icon for the frontend."""
return None
@property
def device_state_attributes(self):
""" Return the sensor attributes."""
return {'type': self._train_type, 'data': json.dumps(self._data)}
@property
def state(self):
""" Return the state of the sensor."""
return self._train_type
def _update(self):
if self._enabled_sensor is not None:
sensor_state = self._hass.states.get(self._enabled_sensor)
if self._enabled_sensor is None or sensor_state.state is STATE_ON:
try:
apidata = self._fpapi.request(self._train_type)
except HASL_Error as e:
_LOGGER.error("A communication error occured while "
"updating train location sensor: %s", e.details)
return
except Exception as e:
_LOGGER.error("A error occured while"
"updating train location sensor: %s", e)
return
self._data = apidata
_LOGGER.info("Update completed %s...", self._name)
class SLVersionSensor(Entity):
"""HASL Version Sensor."""
def __init__(self, hass):
self._hass = hass
self._haslapi = haslapi()
self._name = 'HASL Version'
self._version = __version__
self._py_version = self._haslapi.version()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
""" Return the icon for the frontend."""
return None
@property
def device_state_attributes(self):
""" Return the sensor attributes."""
return {'hasl': self._version, 'pyHasl': self._py_version}
@property
def state(self):
""" Return the state of the sensor."""
return self._version + "/" + self._py_version
class SLStatusSensor(Entity):
"""Trafic Situation Sensor."""
def __init__(self, hass, tl2key, friendly_name,
enabled_sensor, interval, type,
minimization):
self._tl2api = tl2api(tl2key)
self._datakey = 'tl2_' + tl2key
self._interval = interval
self._hass = hass
self._name = friendly_name
self._enabled_sensor = enabled_sensor
self._type = type
self._sensordata = []
self._lastupdate = '-'
self._cachefile = hass.config.path(DEFAULT_CACHE_FILE)
self._minimization = minimization
if not hass.data[DOMAIN].get(self._datakey):
hass.data[DOMAIN][self._datakey] = ''
self.update = Throttle(interval)(self._update)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
""" Return the icon for the frontend."""
return 'mdi:train-car'
@property
def device_state_attributes(self):
""" Return the sensor attributes."""
return self._sensordata
@property
def state(self):
""" Return the state of the sensor."""
return self._lastupdate
def getCache(self, key):
try:
jsonFile = open(self._cachefile, 'r')
data = json.load(jsonFile)
jsonFile.close()
return data.get(key)
except:
return {}
def putCache(self, key, value):
try:
jsonFile = open(self._cachefile, 'r')
data = json.load(jsonFile)
jsonFile.close()
data[key] = value
except:
data = {'' + key + '': value}
jsonFile = open(self._cachefile, 'w')
jsonFile.write(json.dumps(data))
jsonFile.close()
def _update(self):
if self._enabled_sensor is not None:
sensor_state = self._hass.states.get(self._enabled_sensor)
if self._enabled_sensor is None or sensor_state.state is STATE_ON:
_LOGGER.info("Starting to update TL2 for %s...",
self._name)
# Object used to create our object.
newdata = {}
# Use some nice translations for the statuses etc.
statuses = {
'EventGood': 'Good',
'EventMinor': 'Minor',
'EventMajor': 'Closed',
'EventPlanned': 'Planned',
}
# Icon table used for HomeAssistant.
statusIcons = {
'EventGood': 'mdi:check',
'EventMinor': 'mdi:clock-alert-outline',
'EventMajor': 'mdi:close',
'EventPlanned': 'mdi:triangle-outline'
}
trafficTypeIcons = {
'ferry': 'mdi:ferry',
'bus': 'mdi:bus',
'tram': 'mdi:tram',
'train': 'mdi:train',
'local': 'mdi:train-variant',
'metro': 'mdi:subway-variant'
}
# If the same API have already made the request in within
# the specified interval then use that data instead of
# requesting it again and spare some innocent credits from dying.
cacheage = self._hass.data[DOMAIN][self._datakey]
if not cacheage or now(self._hass.config.time_zone) \
- self._interval > cacheage or not self._minimization:
try:
apidata = self._tl2api.request()
apidata = apidata['ResponseData']['TrafficTypes']
self.putCache(self._datakey, apidata)
self._hass.data[DOMAIN][self._datakey] = \
now(self._hass.config.time_zone)
_LOGGER.info("Updated cache for %s...", self._name)
except HASL_Error as e:
_LOGGER.error("A communication error occured while "
"updating TL2 sensor: %s", e.details)
return
except Exception as e:
_LOGGER.error("A error occured while "
"updating TL4 API: %s", e)
return
else:
apidata = self.getCache(self._datakey)
_LOGGER.info("Reusing data from cache for %s...",
self._name)
# Return only the relevant portion of the results.
for response in apidata:
type = response['Type']
if self._type is None or type in self._type:
statustype = ('ferry' if type == 'fer' else type)
newdata[statustype + '_status'] = \
statuses.get(response['StatusIcon'])
newdata[statustype + '_status_icon'] = \
statusIcons.get(response['StatusIcon'])
newdata[statustype + '_icon'] = \
trafficTypeIcons.get(statustype)
for event in response['Events']:
event['Status'] = statuses.get(event['StatusIcon'])
event['StatusIcon'] = \
statusIcons.get(event['StatusIcon'])
newdata[statustype + '_events'] = response['Events']
# Attribution and update sensor data.
newdata['attribution'] = "Stockholms Lokaltrafik"
newdata['last_updated'] = \
self._hass.data[DOMAIN][self._datakey].strftime('%Y-%m-%d' +
'%H:%M:%S')
self._sensordata = newdata
self._lastupdate = newdata['last_updated']
_LOGGER.info("TL2 update completed for %s...", self._name)
class SLDeparturesSensor(Entity):
"""Departure board for one SL site."""
def __init__(self, hass, si2key, ri4key, siteid,
lines, friendly_name, enabled_sensor,
interval, direction, timewindow, sensorproperty,
minimization):
"""Initialize"""
# The table of resulttypes and the corresponding units of measure.
unit_table = {
'min': 'min',
'time': '',
'deviations': '',
'refresh': '',
'update': '',
}
if si2key:
self._si2key = si2key
self._si2api = si2api(si2key, siteid, '')
self._si2datakey = 'si2_' + si2key + '_' + siteid
self._ri4key = ri4key
self._ri4api = ri4api(ri4key, siteid, 60)
self._ri4datakey = 'ri2_' + ri4key + '_' + siteid
self._hass = hass
self._name = friendly_name
self._lines = lines
self._siteid = siteid
self._enabled_sensor = enabled_sensor
self._sensorproperty = sensorproperty
self._departure_table = []
self._deviations_table = []
self._direction = direction
self._timewindow = timewindow
self._nextdeparture_minutes = '0'
self._nextdeparture_expected = '-'
self._lastupdate = '-'
self._interval = interval
self._unit_of_measure = unit_table.get(self._sensorproperty, 'min')
self._cachefile = hass.config.path(DEFAULT_CACHE_FILE)
self._minimization = minimization
if not hass.data[DOMAIN].get(self._ri4datakey):
hass.data[DOMAIN][self._ri4datakey] = ''
if self._si2key:
if not hass.data[DOMAIN].get(self._si2datakey):
hass.data[DOMAIN][self._si2datakey] = ''
# Setup updating of the sensor.
self.update = Throttle(interval)(self._update)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
""" Return the icon for the frontend."""
if self._deviations_table:
return 'mdi:bus-alert'
return 'mdi:bus'
@property
def state(self):
""" Return number of minutes to the next departure """
# If the sensor should return minutes to next departure.
if self._sensorproperty is 'min':
if not self._departure_table:
return '-'
return self._departure_table[0]['time']
# If the sensor should return the time at which next departure occurs.
if self._sensorproperty is 'time':
if not self._departure_table:
return '-'
expected = self._departure_table[0]['expected'] or '-'
if expected is not '-':
expected = \
datetime.datetime.strptime(self._nextdeparture_expected,
'%Y-%m-%dT%H:%M:%S')
expected = expected.strftime('%H:%M:%S')
return expected
# If the sensor should return the number of deviations.
if self._sensorproperty is 'deviations':
return len(self._deviations_table)
# If the sensor should return if it is updating or not.
if self._sensorproperty is 'refresh':
if self._enabled_sensor is None or sensor_state.state is STATE_ON:
return STATE_ON
return STATE_OFF
if self._sensorproperty is 'updated':
if self._lastupdate is '-':
return '-'
return refresh.strftime('%Y-%m-%d %H:%M:%S')
# Failsafe
return '-'
@property
def device_state_attributes(self):
""" Return the sensor attributes ."""
# Initialize the state attributes.
val = {}
# Format the next exptected time.
if self._departure_table:
expected_time = self._departure_table[0]['expected'] or '-'
expected_minutes = self._departure_table[0]['time'] or '-'
if expected_time is not '-':
expected_time = \
datetime.datetime.strptime(expected_time,
'%Y-%m-%dT%H:%M:%S')
expected_time = expected_time.strftime('%H:%M:%S')
else:
expected_time = '-'
expected_minutes = '-'
# Format the last refresh time.
refresh = self._lastupdate
if self._lastupdate is not '-':
refresh = refresh.strftime('%Y-%m-%d %H:%M:%S')
# Setup the unit of measure.
if self._unit_of_measure is not '':
val['unit_of_measurement'] = self._unit_of_measure
# Check if sensor is currently updating or not.
if self._enabled_sensor is not None:
sensor_state = self._hass.states.get(self._enabled_sensor)
if self._enabled_sensor is None or sensor_state.state is STATE_ON:
val['refresh_enabled'] = STATE_ON
else:
val['refresh_enabled'] = STATE_OFF
# Set values of the sensor.
val['attribution'] = 'Stockholms Lokaltrafik'
val['departures'] = self._departure_table
val['deviations'] = self._deviations_table
val['last_refresh'] = refresh
val['next_departure_minutes'] = expected_minutes
val['next_departure_time'] = expected_time
val['deviation_count'] = len(self._deviations_table)
return val
def parseDepartureTime(self, t):
""" weird time formats from the API,
do some quick and dirty conversions. """
try:
if t == 'Nu':
return 0
s = t.split()
if len(s) > 1 and s[1] == 'min':
return int(s[0])
s = t.split(':')
if len(s) > 1:
rightnow = now(self._hass.config.time_zone)
min = int(s[0]) * 60 + int(s[1]) - (rightnow.hour * 60 +
rightnow.minute)
if min < 0:
min = min + 1440
return min
except Exception:
_LOGGER.warning("Failed to parse departure time (%s) ", t)
return 0
def getCache(self, key):
try:
jsonFile = open(self._cachefile, 'r')
data = json.load(jsonFile)
jsonFile.close()
return data.get(key)
except:
return {}
def putCache(self, key, value):
try:
jsonFile = open(self._cachefile, 'r')
data = json.load(jsonFile)
jsonFile.close()
data[key] = value
except:
data = {'' + key + '': value}
jsonFile = open(self._cachefile, 'w')
jsonFile.write(json.dumps(data))
jsonFile.close()
def _update(self):
"""Get the departure board."""
# If using external sensor, get its value.
if self._enabled_sensor is not None:
sensor_state = self._hass.states.get(self._enabled_sensor)
# If we dont have external sensor or it is ON then proceed.
if self._enabled_sensor is None or sensor_state.state \
is STATE_ON:
self._update_ri4()
if self._si2key:
self._update_si2()
self._lastupdate = now(self._hass.config.time_zone)
def _update_ri4(self):
errorOccured = False
_LOGGER.info("Starting to update RI4 for %s...", self._name)
cacheage = self._hass.data[DOMAIN][self._ri4datakey]
if not cacheage or now(self._hass.config.time_zone) \
- self._interval > cacheage or not self._minimization:
try:
departuredata = self._ri4api.request()
departuredata = departuredata['ResponseData']
self.putCache(self._ri4datakey, departuredata)
self._hass.data[DOMAIN][self._ri4datakey] = \
now(self._hass.config.time_zone)
_LOGGER.info("Updated cache for %s...", self._name)
except HASL_Error as e:
_LOGGER.error("A communication error occured while "
"updating SI2 sensor: %s", e.details)
errorOccured = True
except Exception as e:
_LOGGER.error("A communication error occured while "
"updating RI4 API: %s", e)
errorOccured = True
else:
try:
departuredata = self.getCache(self._ri4datakey)
_LOGGER.info("Reusing data from cache for %s...",
self._name)
except Exception as e:
_LOGGER.error("A error occured while retreiving "
"cached RI4 sensor data: %s", e)
errorOccured = True
if not errorOccured:
departures = []
iconswitcher = {
'Buses': 'mdi:bus',
'Trams': 'mdi:tram',
'Ships': 'mdi:ferry',
'Metros': 'mdi:subway-variant',
'Trains': 'mdi:train',
}
for (i, traffictype) in enumerate(['Metros', 'Buses', 'Trains',
'Trams', 'Ships']):
for (idx, value) in enumerate(departuredata[traffictype]):
direction = value['JourneyDirection'] or 0
displaytime = value['DisplayTime'] or ''
destination = value['Destination'] or ''
linenumber = value['LineNumber'] or ''
expected = value['ExpectedDateTime'] or ''
groupofline = value['GroupOfLine'] or ''
icon = iconswitcher.get(traffictype, 'mdi:train-car')
if int(self._direction) == 0 or int(direction) \
== int(self._direction):
if self._lines == [] or linenumber \
in self._lines:
diff = self.parseDepartureTime(displaytime)
if diff < self._timewindow:
departures.append({
'line': linenumber,
'direction': direction,
'departure': displaytime,
'destination': destination,
'time': diff,
'expected': expected,
'type': traffictype,
'groupofline': groupofline,
'icon': icon,
})
self._departure_table = sorted(departures,
key=lambda k: k['time'])
_LOGGER.info("RI4 update completed for %s...", self._name)
def _update_si2(self):
errorOccured = False
_LOGGER.info("Starting to update SI2 for %s...", self._name)
cacheage = self._hass.data[DOMAIN][self._si2datakey]
if not cacheage or now(self._hass.config.time_zone) \
- self._interval > cacheage or not self._minimization:
try:
deviationdata = self._si2api.request()
deviationdata = deviationdata['ResponseData']
self.putCache(self._si2datakey, deviationdata)
self._hass.data[DOMAIN][self._si2datakey] = \
now(self._hass.config.time_zone)
_LOGGER.info('Updated cache for %s...', self._name)
except HASL_Error as e:
_LOGGER.error("A communication error occured while "
"updating SI2 sensor: %s", e.details)
errorOccured = True
except Exception as e:
_LOGGER.error("A error occured while "
"updating SI2 sensor: %s", e)
errorOccured = True
else:
try:
deviationdata = self.getCache(self._si2datakey)
_LOGGER.info("Reusing data from cache for %s...",
self._name)
except Exception as e:
_LOGGER.error("A error occured while retreiving "
"cached SI2 sensor: %s", e.details)
errorOccured = True
if not errorOccured:
deviations = []
for (idx, value) in enumerate(deviationdata):
deviations.append({
'updated': value['Updated'],
'title': value['Header'],
'fromDate': value['FromDateTime'],
'toDate': value['UpToDateTime'],
'details': value['Details'],
'sortOrder': value['SortOrder'],
})
self._deviations_table = \
sorted(deviations, key=lambda k: k['sortOrder'])
_LOGGER.info("SI2 update completed for %s...", self._name)
| 35.369727
| 79
| 0.542058
| 2,842
| 28,508
| 5.234694
| 0.133357
| 0.023593
| 0.018283
| 0.009679
| 0.437655
| 0.387309
| 0.333132
| 0.31626
| 0.284735
| 0.275459
| 0
| 0.007154
| 0.357654
| 28,508
| 805
| 80
| 35.413665
| 0.805264
| 0.068753
| 0
| 0.37585
| 0
| 0
| 0.107269
| 0.002578
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052721
| false
| 0
| 0.022109
| 0
| 0.146259
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbe25f137db8fdda41fdc3006d42e7f6d84f1a1d
| 2,067
|
py
|
Python
|
simbad_tools.py
|
ishivvers/astro
|
ff3f3b9f8ef4013157c277bbb5bf82ac1bd3287d
|
[
"MIT"
] | 1
|
2015-12-06T00:19:35.000Z
|
2015-12-06T00:19:35.000Z
|
simbad_tools.py
|
ishivvers/astro
|
ff3f3b9f8ef4013157c277bbb5bf82ac1bd3287d
|
[
"MIT"
] | null | null | null |
simbad_tools.py
|
ishivvers/astro
|
ff3f3b9f8ef4013157c277bbb5bf82ac1bd3287d
|
[
"MIT"
] | null | null | null |
"""
A quick library to deal with searching simbad for info
about a SN and parsing the results.
Author: Isaac Shivvers, ishivvers@berkeley.edu, 2014
example SIMBAD uri query:
http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=sn%201998S
"""
import re
from urllib2 import urlopen
def get_SN_info( name ):
"""
Queries simbad for SN coords, redshift, and host galaxy.
If redshift is not given for SN, attempts to resolve link to
host galaxy and report its redshift.
Returns ( (ra,dec), redshift, host_name, redshift_citation ), with
values of None inserted whenever it cannot resolve the value.
"""
simbad_uri = "http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=%s"
regex_coords = "Coordinates\(FK5.+\): .+"
regex_redshift = "Redshift:\s+\d+\.\d+.+"
regex_host = "apparent\s+host\s+galaxy\s+.+?\{(.*?)\}"
result = urlopen( simbad_uri % name.replace(' ','%20') ).read()
rescoords = re.search( regex_coords, result )
resred = re.search( regex_redshift, result )
reshost = re.search( regex_host, result )
try:
cs = rescoords.group().split(':')[1].strip()
ra = cs[:12].strip()
dec = cs[12:].strip()
except:
ra,dec = None,None
try:
redshift = float(resred.group().strip('Redshift: ').split(' ')[0])
citation = resred.group().split(' ')[-1]
except AttributeError:
redshift = None
citation = None
try:
host = reshost.group().split('{')[1].split('}')[0]
except AttributeError:
host = None
if (redshift == None) and (host != None):
# get the redshift from the host galaxy
result = urlopen( simbad_uri % host.replace(' ','%20') ).read()
resred = re.search( regex_redshift, result )
try:
redshift = float(resred.group().strip('Redshift: ').split(' ')[0])
citation = resred.group().split(' ')[-1]
except AttributeError:
pass
return ((ra,dec), redshift, host, citation)
| 32.296875
| 88
| 0.610063
| 260
| 2,067
| 4.796154
| 0.376923
| 0.028869
| 0.0417
| 0.028869
| 0.283881
| 0.283881
| 0.230954
| 0.230954
| 0.230954
| 0.230954
| 0
| 0.017165
| 0.238994
| 2,067
| 63
| 89
| 32.809524
| 0.775588
| 0.279149
| 0
| 0.361111
| 0
| 0.027778
| 0.130345
| 0.056552
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0.027778
| 0.055556
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbe3699b610c3c766074b1340770c91698f9123b
| 15,647
|
py
|
Python
|
robots/environments.py
|
StanfordASL/soft-robot-control
|
29ade9b7b952e25e639b42767a4f09c87a0e824a
|
[
"MIT"
] | 5
|
2021-03-07T11:42:11.000Z
|
2022-02-28T09:46:05.000Z
|
robots/environments.py
|
StanfordASL/soft-robot-control
|
29ade9b7b952e25e639b42767a4f09c87a0e824a
|
[
"MIT"
] | null | null | null |
robots/environments.py
|
StanfordASL/soft-robot-control
|
29ade9b7b952e25e639b42767a4f09c87a0e824a
|
[
"MIT"
] | 3
|
2021-01-23T11:09:40.000Z
|
2022-03-02T11:54:57.000Z
|
import os
from math import cos
from math import sin
import Sofa.Core
from splib.numerics import Quat, Vec3
from sofacontrol import measurement_models
path = os.path.dirname(os.path.abspath(__file__))
class TemplateEnvironment:
def __init__(self, name='Template', rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01):
self.name = name
self.robot = Sofa.Core.Node(name)
# set-up solvers
self.robot.addObject('EulerImplicitSolver', name='odesolver', firstOrder="0", rayleighMass=str(rayleighMass),
rayleighStiffness=str(rayleighStiffness))
self.robot.addObject('SparseLDLSolver', name='preconditioner')
self.robot.addObject('GenericConstraintCorrection', solverName="preconditioner")
self.actuator_list = []
self.nb_nodes = None
self.gravity = [0., -9810., 0.] # default
self.dt = dt
def get_measurement_model(self, nodes=None, pos=True, vel=True):
if nodes is None:
return measurement_models.linearModel(range(self.nb_nodes), self.nb_nodes, pos=pos, vel=vel)
else:
return measurement_models.linearModel(nodes, self.nb_nodes, pos=pos, vel=vel)
class Trunk(TemplateEnvironment):
def __init__(self, name='Trunk', all_cables=True):
super(Trunk, self).__init__(name=name)
self.nb_nodes = 709
self.gravity = [0., 0., 9810.]
self.robot.min_force = [0.] * 8 # Without premultiplication with dt
self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/trunk.vtk')
self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container')
self.robot.addObject('TetrahedronSetTopologyModifier')
self.robot.addObject('TetrahedronSetTopologyAlgorithms')
self.robot.addObject('TetrahedronSetGeometryAlgorithms')
# Option 1:
self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false',
showIndicesScale='4e-5')
# Option 2: Equivalent to option 1 (we believe)
# self.robot.addObject('MechanicalObject', src='@loader')
# Gives a mass to the model
self.robot.addObject('UniformMass', totalMass=0.042)
# Add a TetrahedronFEMForceField componant which implement an elastic material model solved using the Finite
# Element Method on tetrahedrons.
self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large',
poissonRatio=0.45,
youngModulus=450)
# Fix the base of the trunk by adding constraints in a region of interest (ROI)
self.robot.addObject('BoxROI', name='boxROI', box=[[-20, -20, 0], [20, 20, 20]], drawBoxes=False)
self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12')
##########################################
# Cable #
##########################################
actuator_names = ''
length1 = 10.
length2 = 2.
lengthTrunk = 195.
pullPoint = [[0., length1, 0.], [-length1, 0., 0.], [0., -length1, 0.], [length1, 0., 0.]]
direction = Vec3(0., length2 - length1, lengthTrunk)
direction.normalize()
nbCables = 4
actuators = self.robot.addChild('actuators')
for i in range(0, nbCables):
childname = 'cableL' + str(i)
theta = 1.57 * i
q = Quat(0., 0., sin(theta / 2.), cos(theta / 2.))
position = [[0., 0., 0.]] * 20
for k in range(0, 20, 2):
v = Vec3(direction[0], direction[1] * 17.5 * (k / 2) + length1, direction[2] * 17.5 * (k / 2) + 21)
position[k] = v.rotateFromQuat(q)
v = Vec3(direction[0], direction[1] * 17.5 * (k / 2) + length1, direction[2] * 17.5 * (k / 2) + 27)
position[k + 1] = v.rotateFromQuat(q)
cableL = actuators.addChild(childname)
cableL.addObject('MechanicalObject', name='meca',
position=pullPoint[i] + [pos.toList() for pos in position])
cableL.addObject('CableConstraint', template='Vec3d', name="cable",
hasPullPoint="0",
indices=list(range(21)),
maxPositiveDisp='70',
maxDispVariation="1",
valueType='force',
minForce=self.robot.min_force[i] * self.robot.dt.value)
cableL.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false')
actuator_names += childname + '/cable,'
self.actuator_list.append(cableL.cable)
if all_cables:
for i in range(0, nbCables):
childname = 'cableS' + str(i)
theta = 1.57 * i
q = Quat(0., 0., sin(theta / 2.), cos(theta / 2.))
position = [[0., 0., 0.]] * 10
for k in range(0, 9, 2):
v = Vec3(direction[0], direction[1] * 17.5 * (k / 2) + length1, direction[2] * 17.5 * (k / 2) + 21)
position[k] = v.rotateFromQuat(q)
v = Vec3(direction[0], direction[1] * 17.5 * (k / 2) + length1, direction[2] * 17.5 * (k / 2) + 27)
position[k + 1] = v.rotateFromQuat(q)
cableS = actuators.addChild(childname)
cableS.addObject('MechanicalObject', name='meca',
position=pullPoint[i] + [pos.toList() for pos in position])
cableS.addObject('CableConstraint', template='Vec3d', name="cable",
hasPullPoint="0",
indices=list(range(10)),
maxPositiveDisp='40',
maxDispVariation="1",
valueType='force',
minForce=self.robot.min_force[i + 4] * self.robot.dt.value)
cableS.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false')
actuator_names += childname + '/cable,'
self.actuator_list.append(cableS.cable)
self.robot.actuator_list = self.actuator_list
##########################################
# Visualization #
##########################################
trunkVisu = self.robot.addChild('VisualModel')
trunkVisu.addObject('MeshSTLLoader', filename=path + "/mesh/trunk.stl")
trunkVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1., 0.8])
trunkVisu.addObject('BarycentricMapping')
class Trunk4Cables(Trunk):
def __init__(self, name='Trunk4Cables'):
super(Trunk4Cables, self).__init__(name=name, all_cables=False)
self.robot.min_force = [0, 0, 0, 0] # Without premultiplication with dt
class Finger(TemplateEnvironment):
def __init__(self, name='Finger'):
super(Finger, self).__init__(name=name)
self.nb_nodes = 158
self.robot.min_force = [0.] # Without premultiplication with dt
self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/finger.vtk')
self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container')
self.robot.addObject('TetrahedronSetTopologyModifier')
self.robot.addObject('TetrahedronSetTopologyAlgorithms')
self.robot.addObject('TetrahedronSetGeometryAlgorithms')
self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false',
showIndicesScale='4e-5')
self.robot.addObject('UniformMass', totalMass=0.075)
# Add a TetrahedronFEMForceField componant which implement an elastic material model solved using the Finite Element Method on tetrahedrons.
self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large',
poissonRatio=0.45,
youngModulus=600)
# Fix the base of the trunk by adding constraints in a region of interest (ROI)
self.robot.addObject('BoxROI', name='boxROI', box=[[-15, 0, 0], [5, 10, 15]], drawBoxes=False)
self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12')
##########################################
# Cable #
##########################################
# This creates a new node in the scene. This node is appended to the finger's node.
actuators = self.robot.addChild('actuators')
cable = actuators.addChild('cable')
# This create a MechanicalObject, a componant holding the degree of freedom of our
# mechanical modelling. In the case of a cable it is a set of positions specifying
# the points where the cable is passing by.
cable.addObject('MechanicalObject', name='meca',
position=(
"-17.5 12.5 2.5 " +
"-32.5 12.5 2.5 " +
"-47.5 12.5 2.5 " +
"-62.5 12.5 2.5 " +
"-77.5 12.5 2.5 " +
"-83.5 12.5 4.5 " +
"-85.5 12.5 6.5 " +
"-85.5 12.5 8.5 " +
"-83.5 12.5 10.5 " +
"-77.5 12.5 12.5 " +
"-62.5 12.5 12.5 " +
"-47.5 12.5 12.5 " +
"-32.5 12.5 12.5 " +
"-17.5 12.5 12.5 "))
# Create a CableConstraint object with a name.
# the indices are referring to the MechanicalObject's positions.
# The last indice is where the pullPoint is connected.
cable.addObject('CableConstraint', name="cable",
indices=list(range(14)),
pullPoint="0.0 12.5 2.5", valueType='force',
minForce=self.robot.min_force[0] * self.robot.dt.value)
# This create a BarycentricMapping. A BarycentricMapping is a key element as it will create a bi-directional link
# between the cable's DoFs and the finger's ones so that movements of the cable's DoFs will be mapped
# to the finger and vice-versa;
cable.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false')
self.actuator_list.append(cable.cable)
self.robot.actuator_list = self.actuator_list
##########################################
# Visualization #
##########################################
# In Sofa, visualization is handled by adding a rendering model.
# Create an empty child node to store this rendering model.
fingerVisu = self.robot.addChild('VisualModel')
# Add to this empty node a rendering model made of triangles and loaded from an stl file.
fingerVisu.addObject('MeshSTLLoader', filename=path + "/mesh/finger.stl")
fingerVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1., 0.8])
# Add a BarycentricMapping to deform rendering model in way that follow the ones of the parent mechanical model.
fingerVisu.addObject('BarycentricMapping')
class Diamond(TemplateEnvironment):
def __init__(self, name='Diamond', totalMass=0.5, poissonRatio=0.45, youngModulus=450, rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01):
super(Diamond, self).__init__(name=name, rayleighMass=rayleighMass, rayleighStiffness=rayleighStiffness, dt=dt)
self.nb_nodes = 1628
self.gravity = [0., 0., -9810.]
rotation = [90, 0.0, 0.0]
translation = [0.0, 0.0, 35]
self.robot.min_force = [0, 0, 0, 0] # Without premultiplication with dt
self.robot.addObject('MeshVTKLoader', name='loader', filename=path + "/mesh/diamond.vtu", rotation=rotation,
translation=translation)
self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container')
self.robot.addObject('TetrahedronSetTopologyModifier')
self.robot.addObject('TetrahedronSetTopologyAlgorithms')
self.robot.addObject('TetrahedronSetGeometryAlgorithms')
self.robot.addObject('MechanicalObject', template='Vec3d', name='tetras', showIndices='false',
showIndicesScale='4e-5')
self.robot.addObject('UniformMass', totalMass=totalMass, name='mass')
self.robot.addObject('TetrahedronFEMForceField', template='Vec3d',
method='large', name='forcefield',
poissonRatio=poissonRatio, youngModulus=youngModulus)
# Fix the base of the trunk by adding constraints in a region of interest (ROI)
self.robot.addObject('BoxROI', name='boxROI', box=[-15, -15, -40, 15, 15, 10], drawBoxes=True)
self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12')
##########################################
# Cable #
##########################################
self.actuatorsParam = [
{'withName': 'A',
'withCableGeometry': [[0, 97, 45]],
'withAPullPointLocation': [0, 10, 30]
},
{'withName': 'B',
'withCableGeometry': [[-97, 0, 45]],
'withAPullPointLocation': [-10, 0, 30]
},
{'withName': 'C',
'withCableGeometry': [[0, -97, 45]],
'withAPullPointLocation': [0, -10, 30]
},
{'withName': 'D',
'withCableGeometry': [[97, 0, 45]],
'withAPullPointLocation': [10, 0, 30]
}
]
actuators = self.robot.addChild('actuators')
for i in range(len(self.actuatorsParam)):
cable = actuators.addChild(self.actuatorsParam[i]['withName'])
cable.addObject('MechanicalObject', position=self.actuatorsParam[i]['withCableGeometry'])
cable.addObject('CableConstraint',
name='cable',
indices=list(range(len(self.actuatorsParam[i]['withCableGeometry']))),
pullPoint=self.actuatorsParam[i]['withAPullPointLocation'],
valueType='force',
hasPullPoint=True,
minForce=self.robot.min_force[i] * self.robot.dt.value
)
cable.addObject('BarycentricMapping', name="Mapping", mapForces=False, mapMasses=False)
self.actuator_list.append(cable.cable)
self.robot.actuator_list = self.actuator_list
##########################################
# Visualization #
##########################################
diamondVisu = self.robot.addChild('VisualModel')
diamondVisu.addObject('MeshSTLLoader', filename=path + "/mesh/diamond.stl")
diamondVisu.addObject('OglModel', template='Vec3d', color=[0.7, 0.7, 0.7, 0.7], updateNormals=False)
diamondVisu.addObject('BarycentricMapping')
| 49.83121
| 148
| 0.547964
| 1,571
| 15,647
| 5.408657
| 0.185232
| 0.059315
| 0.072025
| 0.016006
| 0.625868
| 0.555608
| 0.539249
| 0.522773
| 0.491938
| 0.452277
| 0
| 0.043757
| 0.301847
| 15,647
| 313
| 149
| 49.990415
| 0.734072
| 0.130057
| 0
| 0.275701
| 0
| 0
| 0.167037
| 0.050771
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028037
| false
| 0
| 0.028037
| 0
| 0.088785
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbe3f5f2703d36ffae51f8561d55eb622bc98049
| 21,019
|
py
|
Python
|
generate_training_data_drb.py
|
SimonTopp/Graph-WaveNet
|
ef63a80cc397744667a5d27f7c410c10e3e03a4c
|
[
"MIT"
] | null | null | null |
generate_training_data_drb.py
|
SimonTopp/Graph-WaveNet
|
ef63a80cc397744667a5d27f7c410c10e3e03a4c
|
[
"MIT"
] | null | null | null |
generate_training_data_drb.py
|
SimonTopp/Graph-WaveNet
|
ef63a80cc397744667a5d27f7c410c10e3e03a4c
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import numpy as np
import os
import pandas as pd
import util
import os.path
import pandas as pd
import numpy as np
import yaml
import xarray as xr
import datetime
import pickle
def scale(dataset, std=None, mean=None):
"""
scale the data so it has a standard deviation of 1 and a mean of zero
:param dataset: [xr dataset] input or output data
:param std: [xr dataset] standard deviation if scaling test data with dims
:param mean: [xr dataset] mean if scaling test data with dims
:return: scaled data with original dims
"""
if not isinstance(std, xr.Dataset) or not isinstance(mean, xr.Dataset):
std = dataset.std(skipna=True)
mean = dataset.mean(skipna=True)
# adding small number in case there is a std of zero
scaled = (dataset - mean) / (std + 1e-10)
check_if_finite(std)
check_if_finite(mean)
return scaled, std, mean
def sel_partition_data(dataset, start_dates, end_dates):
"""
select the data from a date range or a set of date ranges
:param dataset: [xr dataset] input or output data with date dimension
:param start_dates: [str or list] fmt: "YYYY-MM-DD"; date(s) to start period
(can have multiple discontinuos periods)
:param end_dates: [str or list] fmt: "YYYY-MM-DD"; date(s) to end period
(can have multiple discontinuos periods)
:return: dataset of just those dates
"""
# if it just one date range
if isinstance(start_dates, str):
if isinstance(end_dates, str):
return dataset.sel(date=slice(start_dates, end_dates))
else:
raise ValueError("start_dates is str but not end_date")
# if it's a list of date ranges
elif isinstance(start_dates, list) or isinstance(start_dates, tuple):
if len(start_dates) == len(end_dates):
data_list = []
for i in range(len(start_dates)):
date_slice = slice(start_dates[i], end_dates[i])
data_list.append(dataset.sel(date=date_slice))
return xr.concat(data_list, dim="date")
else:
raise ValueError("start_dates and end_dates must have same length")
else:
raise ValueError("start_dates must be either str, list, or tuple")
def separate_trn_tst(
dataset,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
):
"""
separate the train data from the test data according to the start and end
dates. This assumes your training data is in one continuous block and all
the dates that are not in the training are in the testing.
:param dataset: [xr dataset] input or output data with dims
:param train_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
train period (can have multiple discontinuos periods)
:param train_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end train
period (can have multiple discontinuos periods)
:param val_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
validation period (can have multiple discontinuos periods)
:param val_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end
validation period (can have multiple discontinuos periods)
:param test_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
test period (can have multiple discontinuos periods)
:param test_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end test
period (can have multiple discontinuos periods)
"""
train = sel_partition_data(dataset, train_start_date, train_end_date)
val = sel_partition_data(dataset, val_start_date, val_end_date)
test = sel_partition_data(dataset, test_start_date, test_end_date)
return train, val, test
def split_into_batches(data_array, seq_len=365, offset=1):
"""
split training data into batches with size of batch_size
:param data_array: [numpy array] array of training data with dims [nseg,
ndates, nfeat]
:param seq_len: [int] length of sequences (i.e., 365)
:param offset: [float] 0-1, how to offset the batches (e.g., 0.5 means that
the first batch will be 0-365 and the second will be 182-547)
:return: [numpy array] batched data with dims [nbatches, nseg, seq_len
(batch_size), nfeat]
"""
combined = []
for i in range(int(1 / offset)):
start = int(i * offset * seq_len)
idx = np.arange(start=start, stop=data_array.shape[1] + 1, step=seq_len)
split = np.split(data_array, indices_or_sections=idx, axis=1)
# add all but the first and last batch since they will be smaller
combined.extend([s for s in split if s.shape[1] == seq_len])
combined = np.asarray(combined)
return combined
def read_multiple_obs(obs_files, x_data):
"""
read and format multiple observation files. we read in the pretrain data to
make sure we have the same indexing.
:param obs_files: [list] list of filenames of observation files
:param pre_train_file: [str] the file of pre_training data
:return: [xr dataset] the observations in the same time
"""
obs = [x_data.sortby(["seg_id_nat", "date"])]
for filename in obs_files:
ds = xr.open_zarr(filename)
obs.append(ds)
if "site_id" in ds.variables:
del ds["site_id"]
obs = xr.merge(obs, join="left")
obs = obs[["temp_c", "discharge_cms"]]
obs = obs.rename(
{"temp_c": "seg_tave_water", "discharge_cms": "seg_outflow"}
)
return obs
def reshape_for_training(data):
"""
reshape the data for training
:param data: training data (either x or y or mask) dims: [nbatch, nseg,
len_seq, nfeat/nout]
:return: reshaped data [nbatch * nseg, len_seq, nfeat/nout]
"""
n_batch, n_seg, seq_len, n_feat = data.shape
return np.reshape(data, [n_batch * n_seg, seq_len, n_feat])
def get_exclude_start_end(exclude_grp):
"""
get the start and end dates for the exclude group
:param exclude_grp: [dict] dictionary representing the exclude group from
the exclude yml file
:return: [tuple of datetime objects] start date, end date
"""
start = exclude_grp.get("start_date")
if start:
start = datetime.datetime.strptime(start, "%Y-%m-%d")
end = exclude_grp.get("end_date")
if end:
end = datetime.datetime.strptime(end, "%Y-%m-%d")
return start, end
def convert_batch_reshape(dataset, seq_len=365, offset=1, y = False, period = np.nan):
"""
convert xarray dataset into numpy array, swap the axes, batch the array and
reshape for training
:param dataset: [xr dataset] data to be batched
:param seq_len: [int] length of sequences (i.e., 365)
:param offset: [float] 0-1, how to offset the batches (e.g., 0.5 means that
the first batch will be 0-365 and the second will be 182-547)
:return: [numpy array] batched and reshaped dataset
"""
# convert xr.dataset to numpy array
dataset = dataset.transpose("seg_id_nat", "date")
arr = dataset.to_array().values
# if the dataset is empty, just return it as is
if dataset.date.size == 0:
return arr
# before [nfeat, nseg, ndates]; after [nseg, ndates, nfeat]
# this is the order that the split into batches expects
arr = np.moveaxis(arr, 0, -1)
# batch the data
# after [nbatch, nseg, seq_len, nfeat]
batched = split_into_batches(arr, seq_len=seq_len, offset=offset)
# reshape data
# after [nseq, seq_len, nseg, nfeat]
#reshaped = reshape_for_training(batched)
reshaped = np.moveaxis(batched, [0,1,2,3], [0,2,1,3])
if y & np.isfinite(period):
reshaped = reshaped[:,-period:,...]
return reshaped
def coord_as_reshaped_array(dataset, coord_name, seq_len=365, offset=1):
# I need one variable name. It can be any in the dataset, but I'll use the
# first
first_var = next(iter(dataset.data_vars.keys()))
coord_array = xr.broadcast(dataset[coord_name], dataset[first_var])[0]
new_var_name = coord_name + "1"
dataset[new_var_name] = coord_array
reshaped_np_arr = convert_batch_reshape(
dataset[[new_var_name]], seq_len=seq_len, offset=offset
)
return reshaped_np_arr
def check_if_finite(xarr):
assert np.isfinite(xarr.to_array().values).all()
def prep_data(
obs_temper_file,
obs_flow_file,
pretrain_file,
#distfile,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
x_vars=None,
y_vars= ["seg_tave_water", "seg_outflow"],
seq_length = 365,
offset = 1,
period = None,
primary_variable="temp",
#catch_prop_file=None,
#exclude_file=None,
#log_q=False,
out_file=None,
#segs=None,
normalize_y=False,
):
"""
prepare input and output data for DL model training read in and process
data into training and testing datasets. the training and testing data are
scaled to have a std of 1 and a mean of zero
:param obs_temper_file: [str] temperature observations file (csv)
:param obs_flow_file:[str] discharge observations file (csv)
:param pretrain_file: [str] the file with the pretraining data (SNTemp data)
:param distfile: [str] path to the distance matrix .npz file
:param train_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
train period (can have multiple discontinuos periods)
:param train_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end train
period (can have multiple discontinuos periods)
:param val_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
validation period (can have multiple discontinuos periods)
:param val_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end
validation period (can have multiple discontinuos periods)
:param test_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
test period (can have multiple discontinuos periods)
:param test_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end test
period (can have multiple discontinuos periods)
:param x_vars: [list] variables that should be used as input. If None, all
of the variables will be used
:param primary_variable: [str] which variable the model should focus on
'temp' or 'flow'. This determines the order of the variables.
:param catch_prop_file: [str] the path to the catchment properties file. If
left unfilled, the catchment properties will not be included as predictors
:param exclude_file: [str] path to exclude file
:param log_q: [bool] whether or not to take the log of discharge in training
:param out_file: [str] file to where the values will be written
:returns: training and testing data along with the means and standard
deviations of the training input and output data
'y_trn_pre': batched, scaled, and centered output data for entire
period of record of SNTemp [n_samples, seq_len, n_out]
'y_obs_trn': batched, scaled, and centered output observation data
for the training period
'y_trn_obs_std': standard deviation of the y observations training
data [n_out]
'y_trn_obs_mean': mean of the observation training data [n_out]
'y_obs_tst': un-batched, unscaled, uncentered observation data for
the test period [n_yrs, n_seg, len_seq, n_out]
'dates_ids_trn: batched dates and national seg ids for training data
[n_samples, seq_len, 2]
'dates_ids_tst: un-batched dates and national seg ids for testing
data [n_yrs, n_seg, len_seq, 2]
"""
ds_pre = xr.open_zarr(pretrain_file)
x_data = ds_pre[x_vars]
# make sure we don't have any weird input values
check_if_finite(x_data)
x_trn, x_val, x_tst = separate_trn_tst(
x_data,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
)
x_scl, x_std, x_mean = scale(x_data)
x_trn_scl, _, _ = scale(x_trn, std=x_std, mean=x_mean)
x_val_scl, _, _ = scale(x_val, std=x_std, mean=x_mean)
x_tst_scl, _, _ = scale(x_tst, std=x_std, mean=x_mean)
y_obs = read_multiple_obs([obs_temper_file, obs_flow_file], x_data)
y_obs = y_obs[y_vars]
y_pre = ds_pre[y_vars]
y_obs_trn, y_obs_val, y_obs_tst = separate_trn_tst(
y_obs,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
)
y_pre_trn, y_pre_val, y_pre_tst = separate_trn_tst(
y_pre,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
)
if normalize_y:
# scale y training data and get the mean and std
y_obs_trn, y_std, y_mean = scale(y_obs_trn)
y_pre_trn, _, _ = scale(y_pre_trn, y_std, y_mean)
else:
_, y_std, y_mean = scale(y_obs_trn)
data = {
"x_train": convert_batch_reshape(x_trn_scl, offset=offset, seq_len=seq_length),
"x_val": convert_batch_reshape(x_val_scl, offset=offset, seq_len=seq_length),
"x_test": convert_batch_reshape(x_tst_scl, offset=offset, seq_len=seq_length),
"x_std": x_std.to_array().values,
"x_mean": x_mean.to_array().values,
"x_cols": np.array(x_vars),
"ids_train": coord_as_reshaped_array(x_trn, "seg_id_nat", offset=offset, seq_len=seq_length),
"dates_train": coord_as_reshaped_array(x_trn, "date", offset=offset, seq_len=seq_length),
"ids_val": coord_as_reshaped_array(x_val, "seg_id_nat", offset=offset, seq_len=seq_length),
"dates_val": coord_as_reshaped_array(x_val, "date", offset=offset, seq_len=seq_length),
"ids_test": coord_as_reshaped_array(x_tst, "seg_id_nat", offset=offset, seq_len=seq_length),
"dates_test": coord_as_reshaped_array(x_tst, "date", offset=offset, seq_len=seq_length),
"y_pre_train": convert_batch_reshape(y_pre_trn, offset=offset, seq_len=seq_length, y=True, period=period),
"y_train": convert_batch_reshape(y_obs_trn, offset=offset, seq_len=seq_length, y=True, period=period),
"y_val": convert_batch_reshape(y_obs_val, offset=offset, seq_len=seq_length, y=True, period=period),
"y_test": convert_batch_reshape(y_obs_tst, offset=offset, seq_len=seq_length, y=True, period=period),
"y_vars": np.array(y_vars),
'period': np.array([period]),
'y_pre_train_val': convert_batch_reshape(y_pre_val, offset=offset, seq_len=seq_length, y=True, period=period),
'y_pre_train_test': convert_batch_reshape(y_pre_tst, offset=offset, seq_len=seq_length, y=True, period=period),
"y_std": y_std.to_array().values,
"y_mean": y_mean.to_array().values,
}
if out_file:
if os.path.isdir(out_file) == False:
os.makedirs(out_file)
'''
np.savez_compressed(os.path.join(out_file, 'pre_train.npz'),
x=data['x_train'],
y=data['y_pre_train'])
np.savez_compressed(os.path.join(out_file,'train.npz'),
x=data['x_train'],
y=data['y_obs_train'],
)
np.savez_compressed(os.path.join(out_file, 'test.npz'),
x=data['x_test'],
y=data['y_obs_tst'],
)
np.savez_compressed(os.path.join(out_file,'val.npz'),
x=data['x_val'],
y=data['y_obs_val'],
)
'''
np.savez_compressed(os.path.join(out_file,'data.npz'), **data)
return data
def prep_adj_matrix(infile, dist_type, out_file=None):
"""
process adj matrix.
**The resulting matrix is sorted by seg_id_nat **
:param infile:
:param dist_type: [str] type of distance matrix ("upstream", "downstream" or
"updown")
:param out_file:
:return: [numpy array] processed adjacency matrix
"""
adj_matrices = np.load(infile)
adj = adj_matrices[dist_type]
adj_full = sort_dist_matrix(adj, adj_matrices["rowcolnames"])
adj = adj_full[2]
adj = np.where(np.isinf(adj), 0, adj)
adj = -adj
mean_adj = np.mean(adj[adj != 0])
std_adj = np.std(adj[adj != 0])
adj[adj != 0] = adj[adj != 0] - mean_adj
adj[adj != 0] = adj[adj != 0] / std_adj
adj[adj != 0] = 1 / (1 + np.exp(-adj[adj != 0]))
I = np.eye(adj.shape[0])
A_hat = adj.copy() + I
D = np.sum(A_hat, axis=1)
D_inv = D ** -1.0
D_inv = np.diag(D_inv)
A_hat = np.matmul(D_inv, A_hat)
if out_file:
out_dm = [adj_full[0], adj_full[1], A_hat]
with open(out_file+'.pkl', 'wb') as f:
pickle.dump(out_dm, f, protocol=2)
return adj_full[0], adj_full[1], A_hat
def sort_dist_matrix(mat, row_col_names):
"""
sort the distance matrix by seg_id_nat
:return:
"""
df = pd.DataFrame(mat, columns=row_col_names, index=row_col_names)
df = df.sort_index(axis=0)
df = df.sort_index(axis=1)
sensor_id_to_ind = {}
for i, sensor_id in enumerate(df.columns):
sensor_id_to_ind[sensor_id] = i
return row_col_names, sensor_id_to_ind, df
#check = prep_adj_matrix('../../gits/river-dl/DRB_data/distance_matrix.npz', 'upstream', 'data/DRB_gwn_full/adj_mx')
#if __name__ == "__main__":
check2 = prep_data(obs_temper_file='../../gits/river-dl/DRB_data/obs_temp_full',
obs_flow_file='../../gits/river-dl/DRB_data/obs_flow_full',
pretrain_file='../../gits/river-dl/DRB_data/uncal_sntemp_input_output',
train_start_date=['1985-10-01', '2016-10-01'],
train_end_date=['2006-09-30', '2020-09-30'],
val_start_date='2006-10-01',
val_end_date='2016-09-30',
test_start_date=['1980-10-01', '2020-10-01'],
test_end_date=['1985-09-30', '2021-09-30'],
x_vars=["seg_rain", "seg_tave_air", "seginc_swrad", "seg_length", "seginc_potet", "seg_slope", "seg_humid",
"seg_elev"],
y_vars=['seg_tave_water'],
primary_variable='temp',
seq_length=365,
period=np.nan,
offset=1,
out_file = 'data/DRB_gwn_full')
'''f __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", type=str, default="data/METR-LA", help="Output directory.")
parser.add_argument("--traffic_df_filename", type=str, default="data/metr-la.h5", help="Raw traffic readings.",)
parser.add_argument("--seq_length_x", type=int, default=12, help="Sequence Length.",)
parser.add_argument("--seq_length_y", type=int, default=12, help="Sequence Length.",)
parser.add_argument("--y_start", type=int, default=1, help="Y pred start", )
parser.add_argument("--dow", action='store_true',)
args = parser.parse_args()
if os.path.exists(args.output_dir):
reply = str(input(f'{args.output_dir} exists. Do you want to overwrite it? (y/n)')).lower().strip()
if reply[0] != 'y': exit
else:
os.makedirs(args.output_dir)
generate_train_val_test(args)
##### Reformat our inputs to match theirs.
df = pd.read_hdf("data/metr-la.h5")
seq_length_x = 12
seq_length_y = 12
y_start = 1
LAtrain = np.load('data/METR-LA/train.npz')
LAtest = np.load('data/METR-LA/test.npz')
LAval = np.load('data/METR-LA/val.npz')
LAtrain['x'].shape
LAtrain['y'].shape
LAtest['x'].shape
LAtest['y'].shape
check = np.moveaxis(data['x_train'], [0,1,2,3], [0,2,1,3])
np.savez_compressed(os.path.join(out_file, 'pre_train.npz'),
x=data['x_train'],
y=data['y_pre_train'])
np.savez_compressed(os.path.join(out_file,'train.npz'),
x=data['x_train'],
y=data['y_pre_train'],
)
np.savez_compressed(os.path.join(out_file, 'test.npz'),
x=data['x_test'],
y=data['y_pre_test'],
)
np.savez_compressed(os.path.join(out_file,'val.npz'),
x=data['x_val'],
y=data['y_pre_val'],
)
'''
| 39.658491
| 120
| 0.63709
| 3,145
| 21,019
| 4.019396
| 0.138951
| 0.016138
| 0.012103
| 0.021359
| 0.39807
| 0.33692
| 0.304565
| 0.271418
| 0.235029
| 0.228384
| 0
| 0.012987
| 0.252676
| 21,019
| 530
| 121
| 39.658491
| 0.791762
| 0.341072
| 0
| 0.186235
| 0
| 0
| 0.088944
| 0.013684
| 0
| 0
| 0
| 0
| 0.004049
| 1
| 0.052632
| false
| 0
| 0.064777
| 0
| 0.174089
| 0.004049
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbe6b1bbfa7c8868231f9a2e70cb8975c45626ee
| 434
|
py
|
Python
|
cs101/module8/8-1/chroma1.py
|
idsdlab/basicai_sp21
|
af9acba34c0417fed830de1b61753c50fd303169
|
[
"MIT"
] | 1
|
2021-03-23T16:18:00.000Z
|
2021-03-23T16:18:00.000Z
|
cs101/module8/8-1/chroma1.py
|
idsdlab/basicai_sp21
|
af9acba34c0417fed830de1b61753c50fd303169
|
[
"MIT"
] | null | null | null |
cs101/module8/8-1/chroma1.py
|
idsdlab/basicai_sp21
|
af9acba34c0417fed830de1b61753c50fd303169
|
[
"MIT"
] | null | null | null |
from cs1media import *
import math
def dist(c1, c2):
r1, g1, b1 = c1
r2, g2, b2 = c2
return math.sqrt((r1-r2)**2 + (g1-g2)**2 + (b1-b2)**2)
def chroma(img, key, threshold):
w, h = img.size()
for y in range(h):
for x in range(w):
p = img.get(x, y)
if dist(p, key) < threshold:
img.set(x, y, Color.yellow)
statue = load_picture("photos/statue1.jpg")
chroma(statue, (41, 75, 146), 70)
statue.show()
| 20.666667
| 56
| 0.582949
| 78
| 434
| 3.230769
| 0.589744
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08982
| 0.230415
| 434
| 20
| 57
| 21.7
| 0.664671
| 0
| 0
| 0
| 0
| 0
| 0.04157
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbe72fbf88b8bf3f7bd1a038ff09959ccc113054
| 3,433
|
py
|
Python
|
wfirst_stars/mklc.py
|
RuthAngus/wfirst_stars
|
60989fc56488ac915082e76c3088c6133909985b
|
[
"MIT"
] | null | null | null |
wfirst_stars/mklc.py
|
RuthAngus/wfirst_stars
|
60989fc56488ac915082e76c3088c6133909985b
|
[
"MIT"
] | null | null | null |
wfirst_stars/mklc.py
|
RuthAngus/wfirst_stars
|
60989fc56488ac915082e76c3088c6133909985b
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy
import scipy.io
import pylab
import numpy
import glob
import pyfits
def mklc(t, nspot=200, incl=(scipy.pi)*5./12., amp=1., tau=30.5, p=10.0):
diffrot = 0.
''' This is a simplified version of the class-based routines in
spot_model.py. It generates a light curves for dark, point like
spots with no limb-darkening.
Parameters:
nspot = desired number of spots present on star at any
one time
amp = desired light curve amplitude
tau = characteristic spot life-time
diffrot = fractional difference between equatorial and polar
rotation period
(unit of time is equatorial rotation period)'''
# print('Period = ', p)
dur = (max(t) - min(t))
# (crude estimate of) total number of spots needed during entire
# time-series
nspot_tot = int(nspot * dur / 2 / tau)
# uniform distribution of spot longitudes
lon = scipy.rand(nspot_tot) * 2 * scipy.pi
# distribution of spot latitudes uniform in sin(latitude)
lat = scipy.arcsin(scipy.rand(nspot_tot))
# spot rotation rate optionally depends on latitude
period = ((scipy.sin(lat) - 0.5) * diffrot + 1.0 ) * p
period0 = scipy.ones(nspot_tot) * p
# all spots have the same maximum area
# (crude estimate of) filling factor needed per spot
ff = amp / scipy.sqrt(nspot)
scale_fac = 1
amax = scipy.ones(nspot_tot) * ff * scale_fac
# all spots have the evolution timescale
decay = scipy.ones(nspot_tot) * tau
# uniform distribution of spot peak times
# start well before and end well after time-series limits (to
# avoid edge effects)
extra = 3 * decay.max()
pk = scipy.rand(nspot_tot) * (dur + 2 * extra) - extra
# COMPUTE THE LIGHT CURVE
# print("Computing light curve...")
time = numpy.array(t - min(t))
area_tot = scipy.zeros_like(time)
dF_tot = scipy.zeros_like(time)
dF_tot0 = scipy.zeros_like(time)
# add up the contributions of individual spots
for i in range(nspot_tot):
# Spot area
if (pk[i] == 0) + (decay[i] == 0):
area = scipy.ones_like(time) * amax[i]
else:
area = amax[i] * \
scipy.exp(-(time - pk[i])**2 / 2. / decay[i]**2)
area_tot += area
# Fore-shortening
phase = 2 * scipy.pi * time / period[i] + lon[i]
phase0 = 2 * scipy.pi * time / period0[i] + lon[i]
mu = scipy.cos(incl) * scipy.sin(lat[i]) + \
scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase)
mu0 = scipy.cos(incl) * scipy.sin(lat[i]) + \
scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase0)
mu[mu < 0] = 0.0
mu0[mu0 < 0] = 0.0
# Flux
dF_tot -= area * mu
dF_tot0 -= area * mu0
amp_eff = dF_tot.max()-dF_tot.min()
nspot_eff = area_tot / scale_fac / ff
res0 = scipy.array([nspot_eff.mean(), ff, amp_eff])
res1 = scipy.zeros((4, len(time)))
res1[0,:] = time
res1[1,:] = area_tot
res1[2,:] = dF_tot
res1[3,:] = dF_tot0
# print('Used %d spots in total over %d rotation periods.' % (nspot_tot, dur))
# print('Mean filling factor of individual spots was %.4f.' % ff)
# print('Desired amplitude was %.4f, actual amplitude was %.4f.' \
# % (amp, amp_eff))
# print('Desired number of spots at any one time was %d.' % nspot)
return res0, res1
| 31.787037
| 82
| 0.605884
| 506
| 3,433
| 4.043478
| 0.341897
| 0.035191
| 0.017595
| 0.024927
| 0.104594
| 0.077224
| 0.054741
| 0.054741
| 0.054741
| 0.054741
| 0
| 0.0252
| 0.271774
| 3,433
| 107
| 83
| 32.084112
| 0.7932
| 0.267987
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.137255
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbe8f2379002738c1c16e7f2d3cd857e1c75e38f
| 10,561
|
py
|
Python
|
davan/http/service/telldus/tdtool.py
|
davandev/davanserver
|
0be914268c8e34d4092251508bae213cff3ef621
|
[
"MIT"
] | null | null | null |
davan/http/service/telldus/tdtool.py
|
davandev/davanserver
|
0be914268c8e34d4092251508bae213cff3ef621
|
[
"MIT"
] | null | null | null |
davan/http/service/telldus/tdtool.py
|
davandev/davanserver
|
0be914268c8e34d4092251508bae213cff3ef621
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, getopt, httplib, urllib, json, os
import oauth.oauth as oauth
import datetime
from configobj import ConfigObj
import logging
global logger
logger = logging.getLogger(os.path.basename(__file__))
import davan.util.application_logger as log_manager
#insert your own public_key and private_key
import davan.config.config_creator as config_creator
configuration = config_creator.create()
PUBLIC_KEY = configuration["TELLDUS_PUBLIC_KEY"]
PRIVATE_KEY = configuration["TELLDUS_PRIVATE_KEY"]
TELLSTICK_TURNON = 1
TELLSTICK_TURNOFF = 2
TELLSTICK_BELL = 4
TELLSTICK_DIM = 16
TELLSTICK_UP = 128
TELLSTICK_DOWN = 256
SUPPORTED_METHODS = TELLSTICK_TURNON | TELLSTICK_TURNOFF | TELLSTICK_BELL | TELLSTICK_DIM | TELLSTICK_UP | TELLSTICK_DOWN;
def printUsage():
print("Usage: %s [ options ]" % sys.argv[0])
print("")
print("Options:")
print(" -[lnfdbvh] [ --list ] [ --help ]")
print(" [ --on device ] [ --off device ] [ --bell device ]")
print(" [ --dimlevel level --dim device ]")
print(" [ --up device --down device ]")
print("")
print(" --list (-l short option)")
print(" List currently configured devices.")
print("")
print(" --help (-h short option)")
print(" Shows this screen.")
print("")
print(" --on device (-n short option)")
print(" Turns on device. 'device' must be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print("")
print(" --off device (-f short option)")
print(" Turns off device. 'device' must be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print("")
print(" --dim device (-d short option)")
print(" Dims device. 'device' must be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print(" Note: The dimlevel parameter must be set before using this option.")
print("")
print(" --dimlevel level (-v short option)")
print(" Set dim level. 'level' should an integer, 0-255.")
print(" Note: This parameter must be set before using dim.")
print("")
print(" --bell device (-b short option)")
print(" Sends bell command to devices supporting this. 'device' must")
print(" be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print("")
print(" --up device")
print(" Sends up command to devices supporting this. 'device' must")
print(" be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print("")
print(" --down device")
print(" Sends down command to devices supporting this. 'device' must")
print(" be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print("")
print(" --list-sensors (-s short option)")
print(" Lists currently configured sensors")
print("")
print(" --sensor-data sensor (-d short option)")
print(" Get sensor data with sensor id number")
print("")
print("Report bugs to <info.tech@telldus.se>")
def listSensors():
response = doRequest('sensors/list', {'includeIgnored': 1});
logger.debug("Number of sensors: %i" % len(response['sensor']));
for sensor in response['sensor']:
lastupdate = datetime.datetime.fromtimestamp(int(sensor['lastUpdated']));
logger.debug( "%s\t%s\t%s" % (sensor['id'], sensor['name'], lastupdate))
def listSensorsAndValues():
response = doRequest('sensors/list', {'includeValues': 1});
return response
def listDevicesAndValues():
response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS})
return response
def getSensorData(sensorId):
response = doRequest('sensor/info', {'id': sensorId });
lastupdate = datetime.datetime.fromtimestamp(int(response['lastUpdated']));
sensor_name = response['name'];
for data in response['data']:
logger.debug( "%s\t%s\t%s\t%s" % (sensor_name, data['name'], data['value'], lastupdate) )
def listDevices():
response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS})
logger.debug("Number of devices: %i" % len(response['device']));
for device in response['device']:
if (device['state'] == TELLSTICK_TURNON):
state = 'ON'
elif (device['state'] == TELLSTICK_TURNOFF):
state = 'OFF'
elif (device['state'] == TELLSTICK_DIM):
state = "DIMMED"
elif (device['state'] == TELLSTICK_UP):
state = "UP"
elif (device['state'] == TELLSTICK_DOWN):
state = "DOWN"
else:
state = 'Unknown state'
logger.debug("%s\t%s\t%s" % (device['id'], device['name'], state));
def doMethod(deviceId, methodId, methodValue = 0):
response = doRequest('device/info', {'id': deviceId})
if (methodId == TELLSTICK_TURNON):
method = 'on'
elif (methodId == TELLSTICK_TURNOFF):
method = 'off'
elif (methodId == TELLSTICK_BELL):
method = 'bell'
elif (methodId == TELLSTICK_UP):
method = 'up'
elif (methodId == TELLSTICK_DOWN):
method = 'down'
if ('error' in response):
name = ''
retString = response['error']
else:
name = response['name']
response = doRequest('device/command', {'id': deviceId, 'method': methodId, 'value': methodValue})
if ('error' in response):
retString = response['error']
else:
retString = response['status']
if (methodId in (TELLSTICK_TURNON, TELLSTICK_TURNOFF)):
logger.debug("Turning %s device %s, %s - %s" % ( method, deviceId, name, retString));
elif (methodId in (TELLSTICK_BELL, TELLSTICK_UP, TELLSTICK_DOWN)):
logger.debug("Sending %s to: %s %s - %s" % (method, deviceId, name, retString))
elif (methodId == TELLSTICK_DIM):
logger.debug("Dimming device: %s %s to %s - %s" % (deviceId, name, methodValue, retString))
def doRequest(method, params):
global config
config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf')
consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY)
token = oauth.OAuthToken(config['token'], config['tokenSecret'])
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url="http://api.telldus.com/json/" + method, parameters=params)
oauth_request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token)
headers = oauth_request.to_header()
headers['Content-Type'] = 'application/x-www-form-urlencoded'
conn = httplib.HTTPConnection("api.telldus.com:80")
conn.request('GET', "/json/" + method + "?" + urllib.urlencode(params, True).replace('+', '%20'), headers=headers)
response = conn.getresponse()
try:
return json.load(response)
except:
logger.debug( 'Failed to decode response :%s'%str(response))
return ""
def requestToken():
global config
consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY)
request = oauth.OAuthRequest.from_consumer_and_token(consumer, http_url='http://api.telldus.com/oauth/requestToken')
request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, None)
conn = httplib.HTTPConnection('api.telldus.com:80')
conn.request(request.http_method, '/oauth/requestToken', headers=request.to_header())
resp = conn.getresponse().read()
token = oauth.OAuthToken.from_string(resp)
logger.debug( 'Open the following url in your webbrowser:\nhttp://api.telldus.com/oauth/authorize?oauth_token=%s\n' % token.key)
logger.debug( 'After logging in and accepting to use this application run:\n%s --authenticate' % (sys.argv[0]))
config['requestToken'] = str(token.key)
config['requestTokenSecret'] = str(token.secret)
saveConfig()
def getAccessToken():
global config
consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY)
token = oauth.OAuthToken(config['requestToken'], config['requestTokenSecret'])
request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url='http://api.telldus.com/oauth/accessToken')
request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token)
conn = httplib.HTTPConnection('api.telldus.com:80')
conn.request(request.http_method, request.to_url(), headers=request.to_header())
resp = conn.getresponse()
if resp.status != 200:
logger.debug( 'Error retreiving access token, the server replied:\n%s' % resp.read())
return
token = oauth.OAuthToken.from_string(resp.read())
config['requestToken'] = None
config['requestTokenSecret'] = None
config['token'] = str(token.key)
config['tokenSecret'] = str(token.secret)
logger.debug( 'Authentication successful, you can now use tdtool')
saveConfig()
def authenticate():
try:
opts, args = getopt.getopt(sys.argv[1:], '', ['authenticate'])
for opt, arg in opts:
if opt in ('--authenticate'):
getAccessToken()
return
except getopt.GetoptError:
pass
requestToken()
def saveConfig():
global config
try:
os.makedirs(os.environ['HOME'] + '/.config/Telldus')
except:
pass
config.write()
def main(argv):
global config
if ('token' not in config or config['token'] == ''):
authenticate()
return
try:
opts, args = getopt.getopt(argv, "lsd:n:f:d:b:v:h", ["list", "list-sensors", "sensor-data=", "on=", "off=", "dim=", "bell=", "dimlevel=", "up=", "down=", "help"])
except getopt.GetoptError:
printUsage()
sys.exit(2)
dimlevel = -1
for opt, arg in opts:
if opt in ("-h", "--help"):
printUsage()
elif opt in ("-l", "--list"):
listDevices()
elif opt in ("-s", "--list-sensors"):
listSensors()
elif opt in ("-x", "--list-sensorsvalue"):
listSensorsAndValues()
elif opt in ("-d", "--sensor-data"):
getSensorData(arg)
elif opt in ("-n", "--on"):
doMethod(arg, TELLSTICK_TURNON)
elif opt in ("-f", "--off"):
doMethod(arg, TELLSTICK_TURNOFF)
elif opt in ("-b", "--bell"):
doMethod(arg, TELLSTICK_BELL)
elif opt in ("-d", "--dim"):
if (dimlevel < 0):
logger.debug("Dimlevel must be set with --dimlevel before --dim")
else:
doMethod(arg, TELLSTICK_DIM, dimlevel)
elif opt in ("-v", "--dimlevel"):
dimlevel = arg
elif opt in ("--up"):
doMethod(arg, TELLSTICK_UP)
elif opt in ("--down"):
doMethod(arg, TELLSTICK_DOWN)
if __name__ == "__main__":
config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf')
configuration = config_creator.create()
log_manager.start_logging(configuration["LOGFILE_PATH"],loglevel=4)
main(sys.argv[1:])
| 35.921769
| 170
| 0.670675
| 1,326
| 10,561
| 5.259427
| 0.191554
| 0.025237
| 0.014196
| 0.011184
| 0.331087
| 0.308718
| 0.288644
| 0.247921
| 0.225695
| 0.163464
| 0
| 0.004686
| 0.171575
| 10,561
| 293
| 171
| 36.044369
| 0.792433
| 0.007954
| 0
| 0.271255
| 0
| 0.004049
| 0.343326
| 0.010598
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0.008097
| 0.02834
| 0
| 0.109312
| 0.226721
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbe91e2d448902d1659cd842f7d5834596d34306
| 16,286
|
py
|
Python
|
ichnaea/data/export.py
|
rajreet/ichnaea
|
7bd2eaa9568f9004e566b802623299625c29f5ae
|
[
"Apache-2.0"
] | 348
|
2015-01-13T11:48:07.000Z
|
2022-03-31T08:33:07.000Z
|
ichnaea/data/export.py
|
rajreet/ichnaea
|
7bd2eaa9568f9004e566b802623299625c29f5ae
|
[
"Apache-2.0"
] | 1,274
|
2015-01-02T18:15:56.000Z
|
2022-03-23T15:29:08.000Z
|
ichnaea/data/export.py
|
rajreet/ichnaea
|
7bd2eaa9568f9004e566b802623299625c29f5ae
|
[
"Apache-2.0"
] | 149
|
2015-01-04T21:15:07.000Z
|
2021-12-10T06:05:09.000Z
|
from collections import defaultdict
import json
import re
import time
from urllib.parse import urlparse
import uuid
import boto3
import boto3.exceptions
import botocore.exceptions
import markus
import redis.exceptions
import requests
import requests.exceptions
from sqlalchemy import select
import sqlalchemy.exc
from ichnaea.data import _map_content_enabled
from ichnaea.models import (
ApiKey,
BlueObservation,
BlueReport,
BlueShard,
CellObservation,
CellReport,
CellShard,
DataMap,
ExportConfig,
Report,
WifiObservation,
WifiReport,
WifiShard,
)
from ichnaea.models.content import encode_datamap_grid
from ichnaea import util
WHITESPACE = re.compile(r"\s", flags=re.UNICODE)
METRICS = markus.get_metrics()
class IncomingQueue(object):
"""
The incoming queue contains the data collected in the web application. It
is the single entrypoint from which all other data pipelines get their
data.
It distributes the data into the configured export queues, checks those
queues and if they contain enough or old enough data schedules an async
export task to process the data in each queue.
"""
def __init__(self, task):
self.task = task
def __call__(self, export_task):
redis_client = self.task.redis_client
data_queue = self.task.app.data_queues["update_incoming"]
data = data_queue.dequeue()
grouped = defaultdict(list)
for item in data:
grouped[(item["api_key"], item.get("source", "gnss"))].append(
{"api_key": item["api_key"], "report": item["report"]}
)
with self.task.db_session(commit=False) as session:
export_configs = ExportConfig.all(session)
with self.task.redis_pipeline() as pipe:
for (api_key, source), items in grouped.items():
for config in export_configs:
if config.allowed(api_key, source):
queue_key = config.queue_key(api_key, source)
queue = config.queue(queue_key, redis_client)
queue.enqueue(items, pipe=pipe)
for config in export_configs:
# Check all queues if they now contain enough data or
# old enough data to be ready for processing.
for queue_key in config.partitions(redis_client):
queue = config.queue(queue_key, redis_client)
if queue.ready():
export_task.delay(config.name, queue_key)
if data_queue.ready():
self.task.apply_countdown()
class ReportExporter(object):
_retriable = (IOError,)
_retries = 3
_retry_wait = 1.0
def __init__(self, task, config, queue_key):
self.task = task
self.config = config
self.queue_key = queue_key
self.queue = config.queue(queue_key, task.redis_client)
self.stats_tags = ["key:" + self.config.name]
@staticmethod
def export(task, name, queue_key):
with task.db_session(commit=False) as session:
config = ExportConfig.get(session, name)
exporter_types = {
"dummy": DummyExporter,
"geosubmit": GeosubmitExporter,
"internal": InternalExporter,
"s3": S3Exporter,
}
exporter_type = exporter_types.get(config.schema)
if exporter_type is not None:
exporter_type(task, config, queue_key)()
def __call__(self):
queue_items = self.queue.dequeue()
if not queue_items:
return
success = False
for i in range(self._retries):
try:
with METRICS.timer("data.export.upload.timing", tags=self.stats_tags):
self.send(queue_items)
success = True
except self._retriable:
success = False
time.sleep(self._retry_wait * (i ** 2 + 1))
if success:
METRICS.incr("data.export.batch", tags=self.stats_tags)
break
if success and self.queue.ready():
self.task.apply_countdown(args=[self.config.name, self.queue_key])
def send(self, queue_items):
raise NotImplementedError()
class DummyExporter(ReportExporter):
def send(self, queue_items):
pass
class GeosubmitExporter(ReportExporter):
_retriable = (IOError, requests.exceptions.RequestException)
def send(self, queue_items):
# ignore metadata
reports = [item["report"] for item in queue_items]
headers = {
"Content-Encoding": "gzip",
"Content-Type": "application/json",
"User-Agent": "ichnaea",
}
response = requests.post(
self.config.url,
data=util.encode_gzip(
json.dumps({"items": reports}).encode(), compresslevel=5
),
headers=headers,
timeout=60.0,
)
# log upload_status and trigger exception for bad responses
# this causes the task to be re-tried
METRICS.incr(
"data.export.upload",
tags=self.stats_tags + ["status:%s" % response.status_code],
)
response.raise_for_status()
class S3Exporter(ReportExporter):
_retriable = (
IOError,
boto3.exceptions.Boto3Error,
botocore.exceptions.BotoCoreError,
)
def send(self, queue_items):
# ignore metadata
reports = [item["report"] for item in queue_items]
_, bucketname, path = urlparse(self.config.url)[:3]
# s3 key names start without a leading slash
path = path.lstrip("/")
if not path.endswith("/"):
path += "/"
year, month, day = util.utcnow().timetuple()[:3]
# strip away queue prefix again
parts = self.queue_key.split(":")
source = parts[1]
api_key = parts[2]
obj_name = path.format(
source=source, api_key=api_key, year=year, month=month, day=day
)
obj_name += uuid.uuid1().hex + ".json.gz"
try:
data = util.encode_gzip(
json.dumps({"items": reports}).encode(), compresslevel=7
)
s3 = boto3.resource("s3")
bucket = s3.Bucket(bucketname)
obj = bucket.Object(obj_name)
obj.put(Body=data, ContentEncoding="gzip", ContentType="application/json")
METRICS.incr(
"data.export.upload", tags=self.stats_tags + ["status:success"]
)
except Exception:
METRICS.incr(
"data.export.upload", tags=self.stats_tags + ["status:failure"]
)
raise
class InternalTransform(object):
"""
This maps the geosubmit v2 schema used in view code and external
transfers (backup, forward to partners) to the internal submit v1
schema used in our own database models.
"""
# *_id maps a source section id to a target section id
# *_map maps fields inside the section from source to target id
# if the names are equal, a simple string can be specified instead
# of a two-tuple
position_id = ("position", None)
position_map = [
("latitude", "lat"),
("longitude", "lon"),
"accuracy",
"altitude",
("altitudeAccuracy", "altitude_accuracy"),
"heading",
"pressure",
"speed",
"source",
]
blue_id = ("bluetoothBeacons", "blue")
blue_map = [("macAddress", "mac"), "age", ("signalStrength", "signal")]
cell_id = ("cellTowers", "cell")
cell_map = [
("radioType", "radio"),
("mobileCountryCode", "mcc"),
("mobileNetworkCode", "mnc"),
("locationAreaCode", "lac"),
("cellId", "cid"),
"age",
"asu",
("primaryScramblingCode", "psc"),
"serving",
("signalStrength", "signal"),
("timingAdvance", "ta"),
]
wifi_id = ("wifiAccessPoints", "wifi")
wifi_map = [
("macAddress", "mac"),
"age",
"channel",
"frequency",
("radioType", "radio"),
("signalToNoiseRatio", "snr"),
("signalStrength", "signal"),
]
def _map_dict(self, item_source, field_map):
value = {}
for spec in field_map:
if isinstance(spec, tuple):
source, target = spec
else:
source = spec
target = spec
source_value = item_source.get(source)
if source_value is not None:
value[target] = source_value
return value
def _parse_dict(self, item, report, key_map, field_map):
value = {}
item_source = item.get(key_map[0])
if item_source:
value = self._map_dict(item_source, field_map)
if value:
if key_map[1] is None:
report.update(value)
else:
report[key_map[1]] = value
return value
def _parse_list(self, item, report, key_map, field_map):
values = []
for value_item in item.get(key_map[0], ()):
value = self._map_dict(value_item, field_map)
if value:
values.append(value)
if values:
report[key_map[1]] = values
return values
def __call__(self, item):
report = {}
self._parse_dict(item, report, self.position_id, self.position_map)
blues = self._parse_list(item, report, self.blue_id, self.blue_map)
cells = self._parse_list(item, report, self.cell_id, self.cell_map)
wifis = self._parse_list(item, report, self.wifi_id, self.wifi_map)
position = item.get("position") or {}
gps_age = position.get("age", 0)
timestamp = item.get("timestamp")
if timestamp:
# turn timestamp into GPS timestamp
report["timestamp"] = timestamp - gps_age
if gps_age:
# Normalize age fields to be relative to GPS time
for type_ in ("blue", "cell", "wifi"):
for record in report.get(type_, ()):
record["age"] = record.get("age", 0) - gps_age
if blues or cells or wifis:
return report
return {}
class InternalExporter(ReportExporter):
_retriable = (IOError, redis.exceptions.RedisError, sqlalchemy.exc.InternalError)
transform = InternalTransform()
def send(self, queue_items):
api_keys = set()
api_keys_known = set()
metrics = {}
items = []
for item in queue_items:
# preprocess items and extract set of API keys
item["report"] = self.transform(item["report"])
if item["report"]:
items.append(item)
api_keys.add(item["api_key"])
for api_key in api_keys:
metrics[api_key] = {}
for type_ in ("report", "blue", "cell", "wifi"):
for action in ("drop", "upload"):
metrics[api_key]["%s_%s" % (type_, action)] = 0
with self.task.db_session(commit=False) as session:
# limit database session to get API keys
keys = [key for key in api_keys if key]
if keys:
columns = ApiKey.__table__.c
rows = session.execute(
select([columns.valid_key]).where(columns.valid_key.in_(keys))
).fetchall()
for row in rows:
api_keys_known.add(row.valid_key)
positions = []
observations = {"blue": [], "cell": [], "wifi": []}
for item in items:
api_key = item["api_key"]
report = item["report"]
obs, malformed_obs = self.process_report(report)
any_data = False
for name in ("blue", "cell", "wifi"):
if obs.get(name):
observations[name].extend(obs[name])
metrics[api_key][name + "_upload"] += len(obs[name])
any_data = True
metrics[api_key][name + "_drop"] += malformed_obs.get(name, 0)
metrics[api_key]["report_upload"] += 1
if any_data:
positions.append((report["lat"], report["lon"]))
else:
metrics[api_key]["report_drop"] += 1
with self.task.redis_pipeline() as pipe:
self.queue_observations(pipe, observations)
if _map_content_enabled and positions:
self.process_datamap(pipe, positions)
self.emit_metrics(api_keys_known, metrics)
def queue_observations(self, pipe, observations):
for datatype, shard_model, shard_key, queue_prefix in (
("blue", BlueShard, "mac", "update_blue_"),
("cell", CellShard, "cellid", "update_cell_"),
("wifi", WifiShard, "mac", "update_wifi_"),
):
queued_obs = defaultdict(list)
for obs in observations[datatype]:
# group by sharded queue
shard_id = shard_model.shard_id(getattr(obs, shard_key))
queue_id = queue_prefix + shard_id
queued_obs[queue_id].append(obs.to_json())
for queue_id, values in queued_obs.items():
# enqueue values for each queue
queue = self.task.app.data_queues[queue_id]
queue.enqueue(values, pipe=pipe)
def emit_metrics(self, api_keys_known, metrics):
for api_key, key_metrics in metrics.items():
api_tag = []
if api_key and api_key in api_keys_known:
api_tag = ["key:%s" % api_key]
for name, count in key_metrics.items():
if not count:
continue
type_, action = name.split("_")
if type_ == "report":
suffix = "report"
tags = api_tag
else:
suffix = "observation"
tags = ["type:%s" % type_] + api_tag
METRICS.incr("data.%s.%s" % (suffix, action), count, tags=tags)
def process_report(self, data):
report = Report.create(**data)
if report is None:
return ({}, {})
malformed = {}
observations = {}
for name, report_cls, obs_cls in (
("blue", BlueReport, BlueObservation),
("cell", CellReport, CellObservation),
("wifi", WifiReport, WifiObservation),
):
malformed[name] = 0
observations[name] = {}
if data.get(name):
for item in data[name]:
# validate the blue/cell/wifi specific fields
item_report = report_cls.create(**item)
if item_report is None:
malformed[name] += 1
continue
# combine general and specific report data into one
item_obs = obs_cls.combine(report, item_report)
item_key = item_obs.unique_key
# if we have better data for the same key, ignore
existing = observations[name].get(item_key)
if existing is not None and existing.better(item_obs):
continue
observations[name][item_key] = item_obs
obs = {
"blue": observations["blue"].values(),
"cell": observations["cell"].values(),
"wifi": observations["wifi"].values(),
}
return (obs, malformed)
def process_datamap(self, pipe, positions):
grids = set()
for lat, lon in positions:
if lat is not None and lon is not None:
grids.add(DataMap.scale(lat, lon))
shards = defaultdict(set)
for lat, lon in grids:
shards[DataMap.shard_id(lat, lon)].add(encode_datamap_grid(lat, lon))
for shard_id, values in shards.items():
queue = self.task.app.data_queues["update_datamap_" + shard_id]
queue.enqueue(list(values), pipe=pipe)
| 31.933333
| 86
| 0.560666
| 1,793
| 16,286
| 4.926938
| 0.215282
| 0.015621
| 0.006113
| 0.009622
| 0.146932
| 0.114331
| 0.094974
| 0.062712
| 0.055468
| 0.046185
| 0
| 0.003869
| 0.333477
| 16,286
| 509
| 87
| 31.996071
| 0.809949
| 0.083814
| 0
| 0.124339
| 0
| 0
| 0.075534
| 0.0031
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0.002646
| 0.050265
| 0
| 0.177249
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbe92a131f4e410b11bc7e2f634cf6f5bfadbd7f
| 6,636
|
py
|
Python
|
test/inference_correctness/dcn_multi_hot.py
|
x-y-z/HugeCTR
|
17bf942215df60827ece9dc015af5191ef9219b7
|
[
"Apache-2.0"
] | 130
|
2021-10-11T11:55:28.000Z
|
2022-03-31T21:53:07.000Z
|
test/inference_correctness/dcn_multi_hot.py
|
Teora/HugeCTR
|
c55a63401ad350669ccfcd374aefd7a5fc879ca2
|
[
"Apache-2.0"
] | 72
|
2021-10-09T04:59:09.000Z
|
2022-03-31T11:27:54.000Z
|
test/inference_correctness/dcn_multi_hot.py
|
Teora/HugeCTR
|
c55a63401ad350669ccfcd374aefd7a5fc879ca2
|
[
"Apache-2.0"
] | 29
|
2021-11-03T22:35:01.000Z
|
2022-03-30T13:11:59.000Z
|
import hugectr
from mpi4py import MPI
solver = hugectr.CreateSolver(model_name = "dcn",
max_eval_batches = 1,
batchsize_eval = 16384,
batchsize = 16384,
lr = 0.001,
vvgpu = [[0]],
repeat_dataset = True,
use_mixed_precision = False,
scaler = 1.0,
use_cuda_graph = True,
metrics_spec = {hugectr.MetricsType.AUC: 1.0})
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm,
source = ["./dcn_data/file_list.txt"],
eval_source = "./dcn_data/file_list_test.txt",
check_type = hugectr.Check_t.Sum,
num_workers = 16)
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam,
update_type = hugectr.Update_t.Global,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 0.0001)
model = hugectr.Model(solver, reader, optimizer)
model.add(hugectr.Input(label_dim = 1, label_name = "label",
dense_dim = 13, dense_name = "dense",
data_reader_sparse_param_array =
[hugectr.DataReaderSparseParam("data1", 2, False, 26)]))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb = 300,
embedding_vec_size = 16,
combiner = "sum",
sparse_embedding_name = "sparse_embedding1",
bottom_name = "data1",
optimizer = optimizer))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["sparse_embedding1"],
top_names = ["reshape1"],
leading_dim=416))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["reshape1", "dense"], top_names = ["concat1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["concat1"],
top_names = ["slice11", "slice12"],
ranges=[(0,429),(0,429)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.MultiCross,
bottom_names = ["slice11"],
top_names = ["multicross1"],
num_layers=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["slice12"],
top_names = ["fc1"],
num_output=1024))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc1"],
top_names = ["relu1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,
bottom_names = ["relu1"],
top_names = ["dropout1"],
dropout_rate=0.5))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["dropout1"],
top_names = ["fc2"],
num_output=1024))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc2"],
top_names = ["relu2"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,
bottom_names = ["relu2"],
top_names = ["dropout2"],
dropout_rate=0.5))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["dropout2", "multicross1"],
top_names = ["concat2"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["concat2"],
top_names = ["fc3"],
num_output=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss,
bottom_names = ["fc3", "label"],
top_names = ["loss"]))
model.compile()
model.summary()
model.graph_to_json(graph_config_file = "/dump_infer/dcn.json")
model.fit(max_iter = 2300, display = 200, eval_interval = 2000, snapshot = 2000, snapshot_prefix = "/dump_infer/dcn")
model.export_predictions("/dump_infer/dcn_pred_" + str(2000), "/dump_infer/dcn_label_" + str(2000))
from hugectr.inference import InferenceParams, CreateInferenceSession
import numpy as np
batch_size = 16384
num_batches = 1
data_source = "./dcn_data/file_list_test.txt"
inference_params = InferenceParams(model_name = "dcn",
max_batchsize = batch_size,
hit_rate_threshold = 1.0,
dense_model_file = "/dump_infer/dcn_dense_2000.model",
sparse_model_files = ["/dump_infer/dcn0_sparse_2000.model"],
device_id = 0,
use_gpu_embedding_cache = False,
cache_size_percentage = 1.0,
i64_input_key = False,
use_mixed_precision = False,
use_cuda_graph = True)
inference_session = CreateInferenceSession("/dump_infer/dcn.json", inference_params)
predictions = inference_session.predict(num_batches = num_batches,
source = data_source,
data_reader_type = hugectr.DataReaderType_t.Norm,
check_type = hugectr.Check_t.Sum)
grount_truth = np.loadtxt("/dump_infer/dcn_pred_2000")
diff = predictions-grount_truth
mse = np.mean(diff*diff)
if mse > 1e-3:
raise RuntimeError("Too large mse between DCN multi hot inference and training: {}".format(mse))
sys.exit(1)
else:
print("DCN multi hot inference results are consistent with those during training, mse: {}".format(mse))
| 56.717949
| 117
| 0.517179
| 628
| 6,636
| 5.187898
| 0.307325
| 0.067526
| 0.069061
| 0.099754
| 0.319521
| 0.313076
| 0.297729
| 0.255985
| 0.255985
| 0.212707
| 0
| 0.038888
| 0.387734
| 6,636
| 117
| 118
| 56.717949
| 0.762983
| 0
| 0
| 0.130435
| 0
| 0
| 0.09884
| 0.032545
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034783
| 0
| 0.034783
| 0.008696
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbeaa0d47dcb9a56338a2f94ede14d6545fab66f
| 4,437
|
py
|
Python
|
bindings/pydrake/systems/perception.py
|
RobotLocomotion/drake-python3.7
|
ae397a4c6985262d23e9675b9bf3927c08d027f5
|
[
"BSD-3-Clause"
] | 2
|
2021-02-25T02:01:02.000Z
|
2021-03-17T04:52:04.000Z
|
bindings/pydrake/systems/perception.py
|
RobotLocomotion/drake-python3.7
|
ae397a4c6985262d23e9675b9bf3927c08d027f5
|
[
"BSD-3-Clause"
] | null | null | null |
bindings/pydrake/systems/perception.py
|
RobotLocomotion/drake-python3.7
|
ae397a4c6985262d23e9675b9bf3927c08d027f5
|
[
"BSD-3-Clause"
] | 1
|
2021-06-13T12:05:39.000Z
|
2021-06-13T12:05:39.000Z
|
import numpy as np
from pydrake.common.value import AbstractValue
from pydrake.math import RigidTransform
from pydrake.perception import BaseField, Fields, PointCloud
from pydrake.systems.framework import LeafSystem
def _TransformPoints(points_Ci, X_CiSi):
# Make homogeneous copy of points.
points_h_Ci = np.vstack((points_Ci,
np.ones((1, points_Ci.shape[1]))))
return X_CiSi.dot(points_h_Ci)[:3, :]
def _TileColors(color, dim):
# Need manual broadcasting.
return np.tile(np.array([color]).T, (1, dim))
def _ConcatenatePointClouds(points_dict, colors_dict):
scene_points = None
scene_colors = None
for id in points_dict:
if scene_points is None:
scene_points = points_dict[id]
else:
scene_points = np.hstack((points_dict[id], scene_points))
if scene_colors is None:
scene_colors = colors_dict[id]
else:
scene_colors = np.hstack((colors_dict[id], scene_colors))
valid_indices = np.logical_not(np.isnan(scene_points))
scene_points = scene_points[:, valid_indices[0, :]]
scene_colors = scene_colors[:, valid_indices[0, :]]
return scene_points, scene_colors
class PointCloudConcatenation(LeafSystem):
"""
.. pydrake_system::
name: PointCloudConcatenation
input_ports:
- point_cloud_CiSi_id0
- X_FCi_id0
- ...
- point_cloud_CiSi_idN
- X_FCi_idN
output_ports:
- point_cloud_FS
"""
def __init__(self, id_list, default_rgb=[255., 255., 255.]):
"""
A system that takes in N point clouds of points Si in frame Ci, and N
RigidTransforms from frame Ci to F, to put each point cloud in a common
frame F. The system returns one point cloud combining all of the
transformed point clouds. Each point cloud must have XYZs. RGBs are
optional. If absent, those points will be the provided default color.
@param id_list A list containing the string IDs of all of the point
clouds. This is often the serial number of the camera they came
from, such as "1" for a simulated camera or "805212060373" for a
real camera.
@param default_rgb A list of length 3 containing the RGB values to use
in the absence of PointCloud.rgbs. Values should be between 0 and
255. The default is white.
"""
LeafSystem.__init__(self)
self._point_cloud_ports = {}
self._transform_ports = {}
self._id_list = id_list
self._default_rgb = np.array(default_rgb)
output_fields = Fields(BaseField.kXYZs | BaseField.kRGBs)
for id in self._id_list:
self._point_cloud_ports[id] = self.DeclareAbstractInputPort(
"point_cloud_CiSi_{}".format(id),
AbstractValue.Make(PointCloud(fields=output_fields)))
self._transform_ports[id] = self.DeclareAbstractInputPort(
"X_FCi_{}".format(id),
AbstractValue.Make(RigidTransform.Identity()))
self.DeclareAbstractOutputPort("point_cloud_FS",
lambda: AbstractValue.Make(
PointCloud(fields=output_fields)),
self.DoCalcOutput)
def _AlignPointClouds(self, context):
points = {}
colors = {}
for id in self._id_list:
point_cloud = self.EvalAbstractInput(
context, self._point_cloud_ports[id].get_index()).get_value()
X_CiSi = self.EvalAbstractInput(
context, self._transform_ports[id].get_index()).get_value()
points[id] = _TransformPoints(
point_cloud.xyzs(), X_CiSi.GetAsMatrix4())
if point_cloud.has_rgbs():
colors[id] = point_cloud.rgbs()
else:
colors[id] = _TileColors(
self._default_rgb, point_cloud.xyzs().shape[1])
return _ConcatenatePointClouds(points, colors)
def DoCalcOutput(self, context, output):
scene_points, scene_colors = self._AlignPointClouds(context)
output.get_mutable_value().resize(scene_points.shape[1])
output.get_mutable_value().mutable_xyzs()[:] = scene_points
output.get_mutable_value().mutable_rgbs()[:] = scene_colors
| 34.664063
| 79
| 0.626549
| 527
| 4,437
| 5.020873
| 0.290323
| 0.060469
| 0.024187
| 0.021542
| 0.101663
| 0.067271
| 0.037037
| 0
| 0
| 0
| 0
| 0.01201
| 0.286906
| 4,437
| 127
| 80
| 34.937008
| 0.824273
| 0.22673
| 0
| 0.074627
| 0
| 0
| 0.01255
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089552
| false
| 0
| 0.074627
| 0.014925
| 0.238806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbec8e855b885f99aff4e865947ea4c6e652c177
| 2,415
|
py
|
Python
|
train.py
|
Farzin-Negahbani/PathoNet
|
b467a255fb356e64129b7942261e972ae15a2d2b
|
[
"MIT"
] | null | null | null |
train.py
|
Farzin-Negahbani/PathoNet
|
b467a255fb356e64129b7942261e972ae15a2d2b
|
[
"MIT"
] | null | null | null |
train.py
|
Farzin-Negahbani/PathoNet
|
b467a255fb356e64129b7942261e972ae15a2d2b
|
[
"MIT"
] | null | null | null |
from keras.callbacks import ModelCheckpoint,Callback,LearningRateScheduler,TensorBoard
from keras.models import load_model
import random
import numpy as np
from scipy import misc
import gc
from keras.optimizers import Adam
from imageio import imread
from datetime import datetime
import os
import json
import models
from utils import DataLoader, LrPolicy
from config import Config
import argparse
def get_parser():
parser = argparse.ArgumentParser('train')
parser.add_argument('--configPath', '-c', required=True)
return parser
def train(args=None):
parser = get_parser()
args = parser.parse_args(args)
conf=Config()
conf.load(args.configPath)
time=datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
trainString="%s_%s_%s_%s" % (conf.model,conf.optimizer,str(conf.lr),time)
os.makedirs(conf.logPath+"/"+trainString)
conf.save(conf.logPath+"/"+trainString+'/config.json')
print('Compiling model...')
model_checkpoint = ModelCheckpoint(conf.logPath+"/"+trainString+'/Checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=False, save_weights_only=True)
change_lr = LearningRateScheduler(LrPolicy(conf.lr).stepDecay)
tbCallBack=TensorBoard(log_dir=conf.logPath+"/"+trainString+'/logs', histogram_freq=0, write_graph=True, write_images=True)
model=models.modelCreator(conf.model,conf.inputShape,conf.classes,conf.pretrainedModel)
model.compile(optimizer = conf.optimizer, loss = conf.loss)
data = [conf.trainDataPath+"/"+f for f in os.listdir(conf.trainDataPath) if '.jpg' in f]
random.shuffle(data)
thr=int(len(data)*conf.validationSplit)
trainData=data[thr:]
valData=data[:thr]
trainDataLoader=DataLoader(conf.batchSize,conf.inputShape,trainData,conf.guaMaxValue)
validationDataLoader=DataLoader(conf.batchSize,conf.inputShape,valData,conf.guaMaxValue)
print('Fitting model...')
model.fit_generator(generator=trainDataLoader.generator(),
validation_data=validationDataLoader.generator(),
steps_per_epoch=len(trainData)//conf.batchSize,
validation_steps=len(valData)//conf.batchSize,
epochs=conf.epoches,
verbose=1,
initial_epoch=0,
callbacks = [model_checkpoint, change_lr,tbCallBack]
)
if __name__ == "__main__":
train()
| 41.637931
| 180
| 0.708075
| 287
| 2,415
| 5.829268
| 0.432056
| 0.0263
| 0.0526
| 0.032277
| 0.044232
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00349
| 0.169358
| 2,415
| 57
| 181
| 42.368421
| 0.830508
| 0
| 0
| 0
| 0
| 0
| 0.068737
| 0.017805
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037736
| false
| 0
| 0.283019
| 0
| 0.339623
| 0.037736
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbed1f6b6c1523d648a1c00ecfbe4157990ceba2
| 1,445
|
py
|
Python
|
tests/chainer_tests/functions_tests/array_tests/test_flatten.py
|
mingxiaoh/chainer-v3
|
815ff00f5eaf7944d6e8a75662ff64a2fe046a4d
|
[
"BSD-3-Clause"
] | 7
|
2017-05-08T07:02:40.000Z
|
2018-12-02T18:35:39.000Z
|
tests/chainer_tests/functions_tests/array_tests/test_flatten.py
|
mingxiaoh/chainer-v3
|
815ff00f5eaf7944d6e8a75662ff64a2fe046a4d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/chainer_tests/functions_tests/array_tests/test_flatten.py
|
mingxiaoh/chainer-v3
|
815ff00f5eaf7944d6e8a75662ff64a2fe046a4d
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3, 4), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestFlatten(unittest.TestCase):
dtype = numpy.float32
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.g_shape = (numpy.prod((1,) + self.shape),)
self.g = numpy.random.uniform(-1, 1, self.g_shape).astype(self.dtype)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.flatten(x)
self.assertEqual(y.shape, self.g_shape)
self.assertEqual(y.dtype, self.dtype)
testing.assert_allclose(self.x.flatten(), y.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, g_data):
gradient_check.check_backward(
functions.Flatten(), x_data, g_data, dtype=numpy.float64)
def test_backward_cpu(self):
self.check_backward(self.x, self.g)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.g))
testing.run_module(__name__, __file__)
| 26.759259
| 77
| 0.680969
| 204
| 1,445
| 4.632353
| 0.245098
| 0.042328
| 0.071958
| 0.04127
| 0.080423
| 0.050794
| 0
| 0
| 0
| 0
| 0
| 0.01458
| 0.19308
| 1,445
| 53
| 78
| 27.264151
| 0.795883
| 0
| 0
| 0.052632
| 0
| 0
| 0.00692
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 1
| 0.184211
| false
| 0
| 0.210526
| 0
| 0.447368
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbed62851d59b2fa6655d17b726752f0c24c4682
| 2,773
|
py
|
Python
|
src/metarl/envs/dm_control/dm_control_env.py
|
neurips2020submission11699/metarl
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
[
"MIT"
] | 2
|
2021-02-07T12:14:52.000Z
|
2021-07-29T08:07:22.000Z
|
src/metarl/envs/dm_control/dm_control_env.py
|
neurips2020submission11699/metarl
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
[
"MIT"
] | null | null | null |
src/metarl/envs/dm_control/dm_control_env.py
|
neurips2020submission11699/metarl
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
[
"MIT"
] | null | null | null |
from dm_control import suite
from dm_control.rl.control import flatten_observation
from dm_env import StepType
import gym
import numpy as np
from metarl.envs import Step
from metarl.envs.dm_control.dm_control_viewer import DmControlViewer
class DmControlEnv(gym.Env):
"""
Binding for `dm_control <https://arxiv.org/pdf/1801.00690.pdf>`_
"""
def __init__(self, env, name=None):
self._name = name or type(env.task).__name__
self._env = env
self._viewer = None
@classmethod
def from_suite(cls, domain_name, task_name):
return cls(suite.load(domain_name, task_name),
name='{}.{}'.format(domain_name, task_name))
def step(self, action):
time_step = self._env.step(action)
return Step(
flatten_observation(time_step.observation)['observations'],
time_step.reward, time_step.step_type == StepType.LAST,
**time_step.observation)
def reset(self):
time_step = self._env.reset()
return flatten_observation(time_step.observation)['observations']
def render(self, mode='human'):
# pylint: disable=inconsistent-return-statements
if mode == 'human':
if not self._viewer:
title = 'dm_control {}'.format(self._name)
self._viewer = DmControlViewer(title=title)
self._viewer.launch(self._env)
self._viewer.render()
return None
elif mode == 'rgb_array':
return self._env.physics.render()
else:
raise NotImplementedError
def close(self):
if self._viewer:
self._viewer.close()
self._env.close()
self._viewer = None
self._env = None
def _flat_shape(self, observation):
return np.sum(int(np.prod(v.shape)) for k, v in observation.items())
@property
def action_space(self):
action_spec = self._env.action_spec()
if (len(action_spec.shape) == 1) and (-np.inf in action_spec.minimum or
np.inf in action_spec.maximum):
return gym.spaces.Discrete(np.prod(action_spec.shape))
else:
return gym.spaces.Box(action_spec.minimum,
action_spec.maximum,
dtype=np.float32)
@property
def observation_space(self):
flat_dim = self._flat_shape(self._env.observation_spec())
return gym.spaces.Box(low=-np.inf,
high=np.inf,
shape=[flat_dim],
dtype=np.float32)
def __getstate__(self):
d = self.__dict__.copy()
d['_viewer'] = None
return d
| 33.011905
| 79
| 0.586729
| 325
| 2,773
| 4.756923
| 0.301538
| 0.045278
| 0.027167
| 0.034929
| 0.085382
| 0.063389
| 0
| 0
| 0
| 0
| 0
| 0.007322
| 0.310494
| 2,773
| 83
| 80
| 33.409639
| 0.801255
| 0.040389
| 0
| 0.121212
| 0
| 0
| 0.025709
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151515
| false
| 0
| 0.106061
| 0.030303
| 0.424242
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbef871a16cf470112cb22aef95e471326a91ea8
| 1,976
|
py
|
Python
|
pype/plugins/maya/publish/validate_look_no_default_shaders.py
|
tokejepsen/pype
|
8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3
|
[
"MIT"
] | null | null | null |
pype/plugins/maya/publish/validate_look_no_default_shaders.py
|
tokejepsen/pype
|
8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3
|
[
"MIT"
] | null | null | null |
pype/plugins/maya/publish/validate_look_no_default_shaders.py
|
tokejepsen/pype
|
8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3
|
[
"MIT"
] | null | null | null |
from maya import cmds
import pyblish.api
import pype.api
import pype.maya.action
class ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin):
"""Validate if any node has a connection to a default shader.
This checks whether the look has any members of:
- lambert1
- initialShadingGroup
- initialParticleSE
- particleCloud1
If any of those is present it will raise an error. A look is not allowed
to have any of the "default" shaders present in a scene as they can
introduce problems when referenced (overriding local scene shaders).
To fix this no shape nodes in the look must have any of default shaders
applied.
"""
order = pype.api.ValidateContentsOrder + 0.01
families = ['look']
hosts = ['maya']
label = 'Look No Default Shaders'
actions = [pype.maya.action.SelectInvalidAction]
DEFAULT_SHADERS = {"lambert1", "initialShadingGroup",
"initialParticleSE", "particleCloud1"}
def process(self, instance):
"""Process all the nodes in the instance"""
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Invalid node relationships found: "
"{0}".format(invalid))
@classmethod
def get_invalid(cls, instance):
invalid = set()
for node in instance:
# Get shading engine connections
shaders = cmds.listConnections(node, type="shadingEngine") or []
# Check for any disallowed connections on *all* nodes
if any(s in cls.DEFAULT_SHADERS for s in shaders):
# Explicitly log each individual "wrong" connection.
for s in shaders:
if s in cls.DEFAULT_SHADERS:
cls.log.error("Node has unallowed connection to "
"'{}': {}".format(s, node))
invalid.add(node)
return list(invalid)
| 31.365079
| 76
| 0.619433
| 225
| 1,976
| 5.417778
| 0.466667
| 0.068909
| 0.021329
| 0.09516
| 0.032814
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005818
| 0.30415
| 1,976
| 62
| 77
| 31.870968
| 0.880727
| 0.325911
| 0
| 0
| 0
| 0
| 0.141398
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.137931
| 0
| 0.482759
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbeff0d906fdca4fe34a55902305e858b8a7efb0
| 446
|
py
|
Python
|
data_science_app/app.py
|
Johne-DuChene/data_science_learning_app
|
40bafce85a27155766950806b5b32a2d1f6753c4
|
[
"MIT"
] | null | null | null |
data_science_app/app.py
|
Johne-DuChene/data_science_learning_app
|
40bafce85a27155766950806b5b32a2d1f6753c4
|
[
"MIT"
] | null | null | null |
data_science_app/app.py
|
Johne-DuChene/data_science_learning_app
|
40bafce85a27155766950806b5b32a2d1f6753c4
|
[
"MIT"
] | null | null | null |
from flask import Flask
# initialize the app
app = Flask(__name__)
# execute iris function at /iris route
@app.route("/iris")
def iris():
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
X, y = load_iris(return_X_y=True)
clf = LogisticRegression(
random_state = 42,
solver="lbfgs",
multi_class="multinomial"
).fit(X, y)
return str(clf.predict(X[:2, :]))
| 24.777778
| 55
| 0.674888
| 60
| 446
| 4.833333
| 0.6
| 0.02069
| 0.103448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008571
| 0.215247
| 446
| 18
| 56
| 24.777778
| 0.82
| 0.123318
| 0
| 0
| 0
| 0
| 0.053985
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbf02afc10d2a9ad48452a7e76a2ad7a46bdd3f5
| 10,714
|
py
|
Python
|
vbdiar/scoring/normalization.py
|
VarunSrivastava19/VBDiarization
|
2a460b4fc11b3a5ff73d0534cadb182be1a9d882
|
[
"Apache-2.0"
] | 101
|
2017-12-19T21:55:59.000Z
|
2022-03-15T06:56:06.000Z
|
vbdiar/scoring/normalization.py
|
VarunSrivastava19/VBDiarization
|
2a460b4fc11b3a5ff73d0534cadb182be1a9d882
|
[
"Apache-2.0"
] | 27
|
2017-07-20T06:10:42.000Z
|
2020-11-22T14:15:16.000Z
|
vbdiar/scoring/normalization.py
|
VarunSrivastava19/VBDiarization
|
2a460b4fc11b3a5ff73d0534cadb182be1a9d882
|
[
"Apache-2.0"
] | 30
|
2017-07-17T08:53:44.000Z
|
2021-05-18T07:37:46.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Brno University of Technology FIT
# Author: Jan Profant <jan.profant@phonexia.com>
# All Rights Reserved
import os
import logging
import pickle
import multiprocessing
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from vbdiar.features.segments import get_frames_from_time
from vbdiar.embeddings.embedding import extract_embeddings
from vbdiar.utils import mkdir_p
from vbdiar.utils.utils import Utils
logger = logging.getLogger(__name__)
def process_files(fns, speakers_dict, features_extractor, embedding_extractor,
audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length, n_jobs=1):
"""
Args:
fns:
speakers_dict:
features_extractor:
embedding_extractor:
audio_dir:
wav_suffix:
in_rttm_dir:
rttm_suffix:
min_length:
n_jobs:
Returns:
"""
kwargs = dict(speakers_dict=speakers_dict, features_extractor=features_extractor,
embedding_extractor=embedding_extractor, audio_dir=audio_dir, wav_suffix=wav_suffix,
in_rttm_dir=in_rttm_dir, rttm_suffix=rttm_suffix, min_length=min_length)
if n_jobs == 1:
ret = _process_files((fns, kwargs))
else:
pool = multiprocessing.Pool(n_jobs)
ret = pool.map(_process_files, ((part, kwargs) for part in Utils.partition(fns, n_jobs)))
return ret
def _process_files(dargs):
"""
Args:
dargs:
Returns:
"""
fns, kwargs = dargs
ret = []
for fn in fns:
ret.append(process_file(file_name=fn, **kwargs))
return ret
def process_file(file_name, speakers_dict, features_extractor, embedding_extractor,
audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length):
""" Extract embeddings for all defined speakers.
Args:
file_name (string_types): path to input audio file
speakers_dict (dict): dictionary containing all embedding across speakers
features_extractor (Any):
embedding_extractor (Any):
audio_dir (string_types):
wav_suffix (string_types):
in_rttm_dir (string_types):
rttm_suffix (string_types):
min_length (float):
Returns:
dict: updated dictionary with speakers
"""
logger.info('Processing file `{}`.'.format(file_name.split()[0]))
# extract features from whole audio
features = features_extractor.audio2features(os.path.join(audio_dir, '{}{}'.format(file_name, wav_suffix)))
# process utterances of the speakers
features_dict = {}
with open(f'{os.path.join(in_rttm_dir, file_name)}{rttm_suffix}') as f:
for line in f:
start_time, dur = int(float(line.split()[3]) * 1000), int(float(line.split()[4]) * 1000)
speaker = line.split()[7]
if dur > min_length:
end_time = start_time + dur
start, end = get_frames_from_time(int(start_time)), get_frames_from_time(int(end_time))
if speaker not in features_dict:
features_dict[speaker] = {}
assert 0 <= start < end, \
f'Incorrect timing for extracting features, start: {start}, size: {features.shape[0]}, end: {end}.'
if end >= features.shape[0]:
end = features.shape[0] - 1
features_dict[speaker][(start_time, end_time)] = features[start:end]
for speaker in features_dict:
embedding_set = extract_embeddings(features_dict[speaker], embedding_extractor)
embeddings_long = embedding_set.get_all_embeddings()
if speaker not in speakers_dict.keys():
speakers_dict[speaker] = embeddings_long
else:
speakers_dict[speaker] = np.concatenate((speakers_dict[speaker], embeddings_long), axis=0)
return speakers_dict
class Normalization(object):
""" Speaker normalization S-Norm. """
embeddings = None
in_emb_dir = None
def __init__(self, norm_list, audio_dir=None, in_rttm_dir=None, in_emb_dir=None,
out_emb_dir=None, min_length=None, features_extractor=None, embedding_extractor=None,
plda=None, wav_suffix='.wav', rttm_suffix='.rttm', n_jobs=1):
""" Initialize normalization object.
Args:
norm_list (string_types): path to normalization list
audio_dir (string_types|None): path to audio directory
in_rttm_dir (string_types|None): path to directory with rttm files
in_emb_dir (str|None): path to directory with i-vectors
out_emb_dir (str|None): path to directory for storing embeddings
min_length (int): minimal length for extracting embeddings
features_extractor (Any): object for feature extraction
embedding_extractor (Any): object for extracting embedding
plda (PLDA|None): plda model object
wav_suffix (string_types): suffix of wav files
rttm_suffix (string_types): suffix of rttm files
"""
if audio_dir:
self.audio_dir = os.path.abspath(audio_dir)
self.norm_list = norm_list
if in_rttm_dir:
self.in_rttm_dir = os.path.abspath(in_rttm_dir)
else:
raise ValueError('It is required to have input rttm files for normalization.')
self.features_extractor = features_extractor
self.embedding_extractor = embedding_extractor
self.plda = plda
self.wav_suffix = wav_suffix
self.rttm_suffix = rttm_suffix
if in_emb_dir:
self.in_emb_dir = os.path.abspath(in_emb_dir)
if out_emb_dir:
self.out_emb_dir = os.path.abspath(out_emb_dir)
self.min_length = min_length
self.n_jobs = n_jobs
if self.in_emb_dir is None:
self.embeddings = self.extract_embeddings()
else:
self.embeddings = self.load_embeddings()
self.mean = np.mean(self.embeddings, axis=0)
def __iter__(self):
current = 0
while current < len(self.embeddings):
yield self.embeddings[current]
current += 1
def __getitem__(self, key):
return self.embeddings[key]
def __setitem__(self, key, value):
self.embeddings[key] = value
def __len__(self):
return len(self.embeddings)
def extract_embeddings(self):
""" Extract normalization embeddings using averaging.
Returns:
Tuple[np.array, np.array]: vectors for individual speakers, global mean over all speakers
"""
speakers_dict, fns = {}, []
with open(self.norm_list) as f:
for line in f:
if len(line.split()) > 1: # number of speakers is defined
line = line.split()[0]
else:
line = line.replace(os.linesep, '')
fns.append(line)
speakers_dict = process_files(fns, speakers_dict=speakers_dict, features_extractor=self.features_extractor,
embedding_extractor=self.embedding_extractor, audio_dir=self.audio_dir,
wav_suffix=self.wav_suffix, in_rttm_dir=self.in_rttm_dir,
rttm_suffix=self.rttm_suffix, min_length=self.min_length, n_jobs=self.n_jobs)
assert len(speakers_dict) == len(fns)
# all are the same
merged_speakers_dict = speakers_dict[0]
if self.out_emb_dir:
for speaker in merged_speakers_dict:
out_path = os.path.join(self.out_emb_dir, f'{speaker}.pkl')
mkdir_p(os.path.dirname(out_path))
with open(out_path, 'wb') as f:
pickle.dump(merged_speakers_dict[speaker], f, pickle.HIGHEST_PROTOCOL)
for speaker in merged_speakers_dict:
merged_speakers_dict[speaker] = np.mean(merged_speakers_dict[speaker], axis=0)
return np.array(list(merged_speakers_dict.values()))
def load_embeddings(self):
""" Load normalization embeddings from pickle files.
Returns:
np.array: embeddings per speaker
"""
embeddings, speakers = [], set()
with open(self.norm_list) as f:
for file_name in f:
if len(file_name.split()) > 1: # number of speakers is defined
file_name = file_name.split()[0]
else:
file_name = file_name.replace(os.linesep, '')
with open('{}{}'.format(os.path.join(self.in_rttm_dir, file_name), self.rttm_suffix)) as fp:
for line in fp:
speakers.add(line.split()[7])
logger.info('Loading pickled normalization embeddings from `{}`.'.format(self.in_emb_dir))
for speaker in speakers:
embedding_path = os.path.join(self.in_emb_dir, '{}.pkl'.format(speaker))
if os.path.isfile(embedding_path):
logger.info('Loading normalization pickle file `{}`.'.format(speaker))
with open(embedding_path, 'rb') as f:
# append mean from speaker's embeddings
speaker_embeddings = pickle.load(f)
embeddings.append(np.mean(speaker_embeddings, axis=0))
else:
logger.warning('No pickle file found for `{}` in `{}`.'.format(speaker, self.in_emb_dir))
return np.array(embeddings)
def s_norm(self, test, enroll):
""" Run speaker normalization (S-Norm) on cached embeddings.
Args:
test (np.array): test embedding
enroll (np.array): enroll embedding
Returns:
float: hypothesis
"""
if self.plda:
a = self.plda.score(test, self.embeddings).T
b = self.plda.score(enroll, self.embeddings).T
c = self.plda.score(enroll, test).T
else:
a = cosine_similarity(test, self.embeddings).T
b = cosine_similarity(enroll, self.embeddings).T
c = cosine_similarity(enroll, test).T
scores = []
for ii in range(test.shape[0]):
test_scores = []
for jj in range(enroll.shape[0]):
test_mean, test_std = np.mean(a.T[ii]), np.std(a.T[ii])
enroll_mean, enroll_std = np.mean(b.T[jj]), np.std(b.T[jj])
s = c[ii][jj]
test_scores.append((((s - test_mean) / test_std + (s - enroll_mean) / enroll_std) / 2))
scores.append(test_scores)
return np.array(scores)
| 39.10219
| 119
| 0.614243
| 1,326
| 10,714
| 4.729261
| 0.165158
| 0.045926
| 0.021528
| 0.023122
| 0.215915
| 0.114176
| 0.080689
| 0.054856
| 0.046564
| 0.046564
| 0
| 0.005383
| 0.289061
| 10,714
| 273
| 120
| 39.245421
| 0.817907
| 0.200299
| 0
| 0.1
| 0
| 0.00625
| 0.048184
| 0.006115
| 0
| 0
| 0
| 0
| 0.0125
| 1
| 0.06875
| false
| 0
| 0.0625
| 0.0125
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbf1c54ca3fd34dfbf7ce18d8d98a14afb9379e4
| 1,056
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2018_10_01/models/virtual_wan_security_providers.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_10_01/models/virtual_wan_security_providers.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_10_01/models/virtual_wan_security_providers.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualWanSecurityProviders(Model):
"""Collection of SecurityProviders.
:param supported_providers:
:type supported_providers:
list[~azure.mgmt.network.v2018_10_01.models.VirtualWanSecurityProvider]
"""
_attribute_map = {
'supported_providers': {'key': 'supportedProviders', 'type': '[VirtualWanSecurityProvider]'},
}
def __init__(self, **kwargs):
super(VirtualWanSecurityProviders, self).__init__(**kwargs)
self.supported_providers = kwargs.get('supported_providers', None)
| 35.2
| 101
| 0.630682
| 98
| 1,056
| 6.622449
| 0.755102
| 0.138675
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009956
| 0.143939
| 1,056
| 29
| 102
| 36.413793
| 0.707965
| 0.582386
| 0
| 0
| 0
| 0
| 0.222494
| 0.06846
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbf2e984865e076aaf055509509eac8230a5a7d1
| 438
|
py
|
Python
|
jsonresume_theme_stackoverflow/filters.py
|
flowgunso/jsonresume-theme-stackoverflow
|
5fcadcf41a93478a09e95d79fd62d8ac3402b33b
|
[
"MIT"
] | null | null | null |
jsonresume_theme_stackoverflow/filters.py
|
flowgunso/jsonresume-theme-stackoverflow
|
5fcadcf41a93478a09e95d79fd62d8ac3402b33b
|
[
"MIT"
] | 4
|
2020-12-29T14:04:48.000Z
|
2021-01-01T20:23:37.000Z
|
jsonresume_theme_stackoverflow/filters.py
|
flowgunso/jsonresume-theme-stackoverflow
|
5fcadcf41a93478a09e95d79fd62d8ac3402b33b
|
[
"MIT"
] | null | null | null |
import datetime
import re
from .exceptions import ObjectIsNotADate
def format_date(value, format="%d %M %Y"):
regex = re.match(r"(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})", value)
if regex is not None:
date = datetime.date(
int(regex.group("year")),
int(regex.group("month")),
int(regex.group("day")))
else:
raise ObjectIsNotADate
return date.strftime(format)
| 24.333333
| 79
| 0.586758
| 60
| 438
| 4.266667
| 0.533333
| 0.09375
| 0.152344
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009009
| 0.239726
| 438
| 17
| 80
| 25.764706
| 0.75976
| 0
| 0
| 0
| 0
| 0.076923
| 0.152968
| 0.107306
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbf349d5a69e925a415de30492c1747e358368f6
| 3,966
|
py
|
Python
|
ipec/data/core.py
|
wwwbbb8510/ippso
|
fa20d23cd8edba5908e65a0ab0ab990d7ce3d5d5
|
[
"MIT"
] | 9
|
2018-05-10T01:04:34.000Z
|
2019-06-28T07:47:37.000Z
|
ipec/data/core.py
|
wwwbbb8510/ippso
|
fa20d23cd8edba5908e65a0ab0ab990d7ce3d5d5
|
[
"MIT"
] | null | null | null |
ipec/data/core.py
|
wwwbbb8510/ippso
|
fa20d23cd8edba5908e65a0ab0ab990d7ce3d5d5
|
[
"MIT"
] | 2
|
2020-10-12T03:54:30.000Z
|
2021-09-08T14:10:21.000Z
|
import numpy as np
import os
import logging
from sklearn.model_selection import train_test_split
DATASET_ROOT_FOLDER = os.path.abspath('datasets')
class DataLoader:
train = None
validation = None
test = None
mode = None
partial_dataset = None
@staticmethod
def load(train_path=None, validation_path=None, test_path=None, height=28, length=28, train_validation_split_point=10000):
if train_path is not None:
DataLoader.train = DataLoader.load_image_data_with_label_at_end(
os.path.join(DATASET_ROOT_FOLDER, train_path), height=height, length=length)
if validation_path is not None:
DataLoader.validation = DataLoader.load_image_data_with_label_at_end(
os.path.join(DATASET_ROOT_FOLDER, validation_path), height=height, length=length)
elif train_validation_split_point is not None and train_validation_split_point > 0:
if DataLoader.mode is None or DataLoader.partial_dataset is not None:
train_validation_split_point = int(DataLoader.train['images'].shape[0] * 0.8)
splited_train = {
'images': DataLoader.train['images'][0:train_validation_split_point, :, :, :],
'labels': DataLoader.train['labels'][0:train_validation_split_point]
}
splited_validation = {
'images': DataLoader.train['images'][train_validation_split_point:, :, :, :],
'labels': DataLoader.train['labels'][train_validation_split_point:]
}
DataLoader.train = splited_train
DataLoader.validation = splited_validation
if test_path is not None:
DataLoader.test = DataLoader.load_image_data_with_label_at_end(os.path.join(DATASET_ROOT_FOLDER, test_path), height=height, length=length)
logging.debug('Training data shape:{}'.format(str(DataLoader.train['images'].shape)))
logging.debug('Validation data shape:{}'.format(str(DataLoader.validation['images'].shape)))
logging.debug('Test data shape:{}'.format(str(DataLoader.test['images'].shape)))
return DataLoader
@staticmethod
def get_training_data():
"""
get training data
:return: dict of (images, labels)
:rtype: dict
"""
images = DataLoader.train.images
labels = DataLoader.train.labels
return {
'images': images,
'labels': labels
}
@staticmethod
def get_validation_data():
"""
get validation data
:return: dict of (images, labels)
:rtype: dict
"""
images = DataLoader.validation.images
labels = DataLoader.validation.labels
return {
'images': images,
'labels': labels
}
@staticmethod
def get_test_data():
"""
get test data
:return: dict of (images, labels)
:rtype: dict
"""
images = DataLoader.test.images
labels = DataLoader.test.labels
return {
'images': images,
'labels': labels
}
@staticmethod
def load_image_data_with_label_at_end(path, height, length):
data = np.loadtxt(path)
if DataLoader.mode is None:
data = data[0:1000, :]
elif DataLoader.partial_dataset is not None and DataLoader.partial_dataset > 0 and DataLoader.partial_dataset <1:
# randomly pick partial dataset
cut_point = int(data.shape[0] * DataLoader.partial_dataset)
indices = np.random.permutation(data.shape[0])
training_idx= indices[:cut_point]
data = data[training_idx, :]
images = data[:, 0:-1]
labels = data[:, -1]
images = np.reshape(images, [images.shape[0], height, length, 1], order='F')
return {
'images': images,
'labels': labels
}
| 36.054545
| 150
| 0.614977
| 439
| 3,966
| 5.355353
| 0.170843
| 0.070183
| 0.068056
| 0.08507
| 0.444492
| 0.300723
| 0.27265
| 0.261165
| 0.195236
| 0.149298
| 0
| 0.010208
| 0.283661
| 3,966
| 109
| 151
| 36.385321
| 0.817318
| 0.056228
| 0
| 0.21519
| 0
| 0
| 0.053492
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063291
| false
| 0
| 0.050633
| 0
| 0.253165
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbf566f5e271a38bb7effb6c5cb9d1b3bcf1fdab
| 22,131
|
py
|
Python
|
test/python/quantum_info/operators/test_operator.py
|
EnriqueL8/qiskit-terra
|
08b801f1f8598c4e44680b4a75c232ed92db0262
|
[
"Apache-2.0"
] | 2
|
2019-06-28T19:58:42.000Z
|
2019-07-26T05:04:02.000Z
|
test/python/quantum_info/operators/test_operator.py
|
EnriqueL8/qiskit-terra
|
08b801f1f8598c4e44680b4a75c232ed92db0262
|
[
"Apache-2.0"
] | null | null | null |
test/python/quantum_info/operators/test_operator.py
|
EnriqueL8/qiskit-terra
|
08b801f1f8598c4e44680b4a75c232ed92db0262
|
[
"Apache-2.0"
] | 1
|
2020-01-24T21:01:06.000Z
|
2020-01-24T21:01:06.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Tests for Operator matrix linear operator class."""
import unittest
import logging
import copy
import numpy as np
from numpy.testing import assert_allclose
import scipy.linalg as la
from qiskit import QiskitError
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.extensions.standard import HGate, CHGate, CXGate
from qiskit.test import QiskitTestCase
from qiskit.quantum_info.operators.operator import Operator
from qiskit.quantum_info.operators.predicates import matrix_equal
logger = logging.getLogger(__name__)
class OperatorTestCase(QiskitTestCase):
"""Test utils for Operator"""
# Pauli-matrix unitaries
UI = np.eye(2)
UX = np.array([[0, 1], [1, 0]])
UY = np.array([[0, -1j], [1j, 0]])
UZ = np.diag([1, -1])
UH = np.array([[1, 1], [1, -1]]) / np.sqrt(2)
@classmethod
def rand_rho(cls, n):
"""Return random density matrix"""
seed = np.random.randint(0, np.iinfo(np.int32).max)
logger.debug("rand_rho RandomState seeded with seed=%s", seed)
rng = np.random.RandomState(seed)
psi = rng.rand(n) + 1j * rng.rand(n)
rho = np.outer(psi, psi.conj())
rho /= np.trace(rho)
return rho
@classmethod
def rand_matrix(cls, rows, cols=None, real=False):
"""Return a random matrix."""
seed = np.random.randint(0, np.iinfo(np.int32).max)
logger.debug("rand_matrix RandomState seeded with seed=%s", seed)
rng = np.random.RandomState(seed)
if cols is None:
cols = rows
if real:
return rng.rand(rows, cols)
return rng.rand(rows, cols) + 1j * rng.rand(rows, cols)
def simple_circuit_no_measure(self):
"""Return a unitary circuit and the corresponding unitary array."""
qr = QuantumRegister(3)
circ = QuantumCircuit(qr)
circ.h(qr[0])
circ.x(qr[1])
circ.ry(np.pi / 2, qr[2])
y90 = (1 / np.sqrt(2)) * np.array([[1, -1], [1, 1]])
target = Operator(np.kron(y90, np.kron(self.UX, self.UH)))
return circ, target
def simple_circuit_with_measure(self):
"""Return a unitary circuit with measurement."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
circ = QuantumCircuit(qr, cr)
circ.h(qr[0])
circ.x(qr[1])
circ.measure(qr, cr)
return circ
class TestOperator(OperatorTestCase):
"""Tests for Operator linear operator class."""
def test_init_array_qubit(self):
"""Test subsystem initialization from N-qubit array."""
# Test automatic inference of qubit subsystems
mat = self.rand_matrix(8, 8)
op = Operator(mat)
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (8, 8))
self.assertEqual(op.input_dims(), (2, 2, 2))
self.assertEqual(op.output_dims(), (2, 2, 2))
op = Operator(mat, input_dims=8, output_dims=8)
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (8, 8))
self.assertEqual(op.input_dims(), (2, 2, 2))
self.assertEqual(op.output_dims(), (2, 2, 2))
def test_init_array(self):
"""Test initialization from array."""
mat = np.eye(3)
op = Operator(mat)
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (3, 3))
self.assertEqual(op.input_dims(), (3,))
self.assertEqual(op.output_dims(), (3,))
mat = self.rand_matrix(2 * 3 * 4, 4 * 5)
op = Operator(mat, input_dims=[4, 5], output_dims=[2, 3, 4])
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (4 * 5, 2 * 3 * 4))
self.assertEqual(op.input_dims(), (4, 5))
self.assertEqual(op.output_dims(), (2, 3, 4))
def test_init_array_except(self):
"""Test initialization exception from array."""
mat = self.rand_matrix(4, 4)
self.assertRaises(QiskitError, Operator, mat, input_dims=[4, 2])
self.assertRaises(QiskitError, Operator, mat, input_dims=[2, 4])
self.assertRaises(QiskitError, Operator, mat, input_dims=5)
def test_init_operator(self):
"""Test initialization from Operator."""
op1 = Operator(self.rand_matrix(4, 4))
op2 = Operator(op1)
self.assertEqual(op1, op2)
def test_circuit_init(self):
"""Test initialization from a circuit."""
# Test tensor product of 1-qubit gates
circuit = QuantumCircuit(3)
circuit.h(0)
circuit.x(1)
circuit.ry(np.pi / 2, 2)
op = Operator(circuit)
y90 = (1 / np.sqrt(2)) * np.array([[1, -1], [1, 1]])
target = np.kron(y90, np.kron(self.UX, self.UH))
global_phase_equivalent = matrix_equal(
op.data, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
# Test decomposition of Controlled-u1 gate
lam = np.pi / 4
circuit = QuantumCircuit(2)
circuit.cu1(lam, 0, 1)
op = Operator(circuit)
target = np.diag([1, 1, 1, np.exp(1j * lam)])
global_phase_equivalent = matrix_equal(
op.data, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
# Test decomposition of controlled-H gate
circuit = QuantumCircuit(2)
circuit.ch(0, 1)
op = Operator(circuit)
target = np.kron(self.UI, np.diag([1, 0])) + np.kron(
self.UH, np.diag([0, 1]))
global_phase_equivalent = matrix_equal(
op.data, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
def test_instruction_init(self):
"""Test initialization from a circuit."""
gate = CXGate()
op = Operator(gate).data
target = gate.to_matrix()
global_phase_equivalent = matrix_equal(op, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
gate = CHGate()
op = Operator(gate).data
had = HGate().to_matrix()
target = np.kron(had, np.diag([0, 1])) + np.kron(
np.eye(2), np.diag([1, 0]))
global_phase_equivalent = matrix_equal(op, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
def test_circuit_init_except(self):
"""Test initialization from circuit with measure raises exception."""
circuit = self.simple_circuit_with_measure()
self.assertRaises(QiskitError, Operator, circuit)
def test_equal(self):
"""Test __eq__ method"""
mat = self.rand_matrix(2, 2, real=True)
self.assertEqual(Operator(np.array(mat, dtype=complex)),
Operator(mat))
mat = self.rand_matrix(4, 4)
self.assertEqual(Operator(mat.tolist()),
Operator(mat))
def test_data(self):
"""Test Operator representation string property."""
mat = self.rand_matrix(2, 2)
op = Operator(mat)
assert_allclose(mat, op.data)
def test_dim(self):
"""Test Operator dim property."""
mat = self.rand_matrix(4, 4)
self.assertEqual(Operator(mat).dim, (4, 4))
self.assertEqual(Operator(mat, input_dims=[4], output_dims=[4]).dim, (4, 4))
self.assertEqual(Operator(mat, input_dims=[2, 2], output_dims=[2, 2]).dim, (4, 4))
def test_input_dims(self):
"""Test Operator input_dims method."""
op = Operator(self.rand_matrix(2 * 3 * 4, 4 * 5),
input_dims=[4, 5], output_dims=[2, 3, 4])
self.assertEqual(op.input_dims(), (4, 5))
self.assertEqual(op.input_dims(qargs=[0, 1]), (4, 5))
self.assertEqual(op.input_dims(qargs=[1, 0]), (5, 4))
self.assertEqual(op.input_dims(qargs=[0]), (4,))
self.assertEqual(op.input_dims(qargs=[1]), (5,))
def test_output_dims(self):
"""Test Operator output_dims method."""
op = Operator(self.rand_matrix(2 * 3 * 4, 4 * 5),
input_dims=[4, 5], output_dims=[2, 3, 4])
self.assertEqual(op.output_dims(), (2, 3, 4))
self.assertEqual(op.output_dims(qargs=[0, 1, 2]), (2, 3, 4))
self.assertEqual(op.output_dims(qargs=[2, 1, 0]), (4, 3, 2))
self.assertEqual(op.output_dims(qargs=[2, 0, 1]), (4, 2, 3))
self.assertEqual(op.output_dims(qargs=[0]), (2,))
self.assertEqual(op.output_dims(qargs=[1]), (3,))
self.assertEqual(op.output_dims(qargs=[2]), (4,))
self.assertEqual(op.output_dims(qargs=[0, 2]), (2, 4))
self.assertEqual(op.output_dims(qargs=[2, 0]), (4, 2))
def test_reshape(self):
"""Test Operator reshape method."""
op = Operator(self.rand_matrix(8, 8))
reshaped1 = op.reshape(input_dims=[8], output_dims=[8])
reshaped2 = op.reshape(input_dims=[4, 2], output_dims=[2, 4])
self.assertEqual(op.output_dims(), (2, 2, 2))
self.assertEqual(op.input_dims(), (2, 2, 2))
self.assertEqual(reshaped1.output_dims(), (8,))
self.assertEqual(reshaped1.input_dims(), (8,))
self.assertEqual(reshaped2.output_dims(), (2, 4))
self.assertEqual(reshaped2.input_dims(), (4, 2))
def test_copy(self):
"""Test Operator copy method"""
mat = np.eye(2)
with self.subTest("Deep copy"):
orig = Operator(mat)
cpy = orig.copy()
cpy._data[0, 0] = 0.0
self.assertFalse(cpy == orig)
with self.subTest("Shallow copy"):
orig = Operator(mat)
clone = copy.copy(orig)
clone._data[0, 0] = 0.0
self.assertTrue(clone == orig)
def test_is_unitary(self):
"""Test is_unitary method."""
# X-90 rotation
X90 = la.expm(-1j * 0.5 * np.pi * np.array([[0, 1], [1, 0]]) / 2)
self.assertTrue(Operator(X90).is_unitary())
# Non-unitary should return false
self.assertFalse(Operator([[1, 0], [0, 0]]).is_unitary())
def test_to_operator(self):
"""Test to_operator method."""
op1 = Operator(self.rand_matrix(4, 4))
op2 = op1.to_operator()
self.assertEqual(op1, op2)
def test_conjugate(self):
"""Test conjugate method."""
matr = self.rand_matrix(2, 4, real=True)
mati = self.rand_matrix(2, 4, real=True)
op = Operator(matr + 1j * mati)
uni_conj = op.conjugate()
self.assertEqual(uni_conj, Operator(matr - 1j * mati))
def test_transpose(self):
"""Test transpose method."""
matr = self.rand_matrix(2, 4, real=True)
mati = self.rand_matrix(2, 4, real=True)
op = Operator(matr + 1j * mati)
uni_t = op.transpose()
self.assertEqual(uni_t, Operator(matr.T + 1j * mati.T))
def test_adjoint(self):
"""Test adjoint method."""
matr = self.rand_matrix(2, 4, real=True)
mati = self.rand_matrix(2, 4, real=True)
op = Operator(matr + 1j * mati)
uni_adj = op.adjoint()
self.assertEqual(uni_adj, Operator(matr.T - 1j * mati.T))
def test_compose_except(self):
"""Test compose different dimension exception"""
self.assertRaises(QiskitError,
Operator(np.eye(2)).compose,
Operator(np.eye(3)))
self.assertRaises(QiskitError, Operator(np.eye(2)).compose, 2)
def test_compose(self):
"""Test compose method."""
op1 = Operator(self.UX)
op2 = Operator(self.UY)
targ = Operator(np.dot(self.UY, self.UX))
self.assertEqual(op1.compose(op2), targ)
self.assertEqual(op1 @ op2, targ)
targ = Operator(np.dot(self.UX, self.UY))
self.assertEqual(op2.compose(op1), targ)
self.assertEqual(op2 @ op1, targ)
def test_dot(self):
"""Test dot method."""
op1 = Operator(self.UY)
op2 = Operator(self.UX)
targ = Operator(np.dot(self.UY, self.UX))
self.assertEqual(op1.dot(op2), targ)
self.assertEqual(op1 * op2, targ)
targ = Operator(np.dot(self.UX, self.UY))
self.assertEqual(op2.dot(op1), targ)
self.assertEqual(op2 * op1, targ)
def test_compose_front(self):
"""Test front compose method."""
opYX = Operator(self.UY).compose(Operator(self.UX), front=True)
matYX = np.dot(self.UY, self.UX)
self.assertEqual(opYX, Operator(matYX))
opXY = Operator(self.UX).compose(Operator(self.UY), front=True)
matXY = np.dot(self.UX, self.UY)
self.assertEqual(opXY, Operator(matXY))
def test_compose_subsystem(self):
"""Test subsystem compose method."""
# 3-qubit operator
mat = self.rand_matrix(8, 8)
mat_a = self.rand_matrix(2, 2)
mat_b = self.rand_matrix(2, 2)
mat_c = self.rand_matrix(2, 2)
op = Operator(mat)
op1 = Operator(mat_a)
op2 = Operator(np.kron(mat_b, mat_a))
op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))
# op3 qargs=[0, 1, 2]
targ = np.dot(np.kron(mat_c, np.kron(mat_b, mat_a)), mat)
self.assertEqual(op.compose(op3, qargs=[0, 1, 2]), Operator(targ))
self.assertEqual(op.compose(op3([0, 1, 2])), Operator(targ))
self.assertEqual(op @ op3([0, 1, 2]), Operator(targ))
# op3 qargs=[2, 1, 0]
targ = np.dot(np.kron(mat_a, np.kron(mat_b, mat_c)), mat)
self.assertEqual(op.compose(op3, qargs=[2, 1, 0]), Operator(targ))
self.assertEqual(op @ op3([2, 1, 0]), Operator(targ))
# op2 qargs=[0, 1]
targ = np.dot(np.kron(np.eye(2), np.kron(mat_b, mat_a)), mat)
self.assertEqual(op.compose(op2, qargs=[0, 1]), Operator(targ))
self.assertEqual(op @ op2([0, 1]), Operator(targ))
# op2 qargs=[2, 0]
targ = np.dot(np.kron(mat_a, np.kron(np.eye(2), mat_b)), mat)
self.assertEqual(op.compose(op2, qargs=[2, 0]), Operator(targ))
self.assertEqual(op @ op2([2, 0]), Operator(targ))
# op1 qargs=[0]
targ = np.dot(np.kron(np.eye(4), mat_a), mat)
self.assertEqual(op.compose(op1, qargs=[0]), Operator(targ))
self.assertEqual(op @ op1([0]), Operator(targ))
# op1 qargs=[1]
targ = np.dot(np.kron(np.eye(2), np.kron(mat_a, np.eye(2))), mat)
self.assertEqual(op.compose(op1, qargs=[1]), Operator(targ))
self.assertEqual(op @ op1([1]), Operator(targ))
# op1 qargs=[2]
targ = np.dot(np.kron(mat_a, np.eye(4)), mat)
self.assertEqual(op.compose(op1, qargs=[2]), Operator(targ))
self.assertEqual(op @ op1([2]), Operator(targ))
def test_dot_subsystem(self):
"""Test subsystem dot method."""
# 3-qubit operator
mat = self.rand_matrix(8, 8)
mat_a = self.rand_matrix(2, 2)
mat_b = self.rand_matrix(2, 2)
mat_c = self.rand_matrix(2, 2)
op = Operator(mat)
op1 = Operator(mat_a)
op2 = Operator(np.kron(mat_b, mat_a))
op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))
# op3 qargs=[0, 1, 2]
targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a)))
self.assertEqual(op.dot(op3, qargs=[0, 1, 2]), Operator(targ))
self.assertEqual(op * op3([0, 1, 2]), Operator(targ))
# op3 qargs=[2, 1, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c)))
self.assertEqual(op.dot(op3, qargs=[2, 1, 0]), Operator(targ))
self.assertEqual(op * op3([2, 1, 0]), Operator(targ))
# op2 qargs=[0, 1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a)))
self.assertEqual(op.dot(op2, qargs=[0, 1]), Operator(targ))
self.assertEqual(op * op2([0, 1]), Operator(targ))
# op2 qargs=[2, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b)))
self.assertEqual(op.dot(op2, qargs=[2, 0]), Operator(targ))
self.assertEqual(op * op2([2, 0]), Operator(targ))
# op1 qargs=[0]
targ = np.dot(mat, np.kron(np.eye(4), mat_a))
self.assertEqual(op.dot(op1, qargs=[0]), Operator(targ))
self.assertEqual(op * op1([0]), Operator(targ))
# op1 qargs=[1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2))))
self.assertEqual(op.dot(op1, qargs=[1]), Operator(targ))
self.assertEqual(op * op1([1]), Operator(targ))
# op1 qargs=[2]
targ = np.dot(mat, np.kron(mat_a, np.eye(4)))
self.assertEqual(op.dot(op1, qargs=[2]), Operator(targ))
self.assertEqual(op * op1([2]), Operator(targ))
def test_compose_front_subsystem(self):
"""Test subsystem front compose method."""
# 3-qubit operator
mat = self.rand_matrix(8, 8)
mat_a = self.rand_matrix(2, 2)
mat_b = self.rand_matrix(2, 2)
mat_c = self.rand_matrix(2, 2)
op = Operator(mat)
op1 = Operator(mat_a)
op2 = Operator(np.kron(mat_b, mat_a))
op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))
# op3 qargs=[0, 1, 2]
targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a)))
self.assertEqual(op.compose(op3, qargs=[0, 1, 2], front=True), Operator(targ))
# op3 qargs=[2, 1, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c)))
self.assertEqual(op.compose(op3, qargs=[2, 1, 0], front=True), Operator(targ))
# op2 qargs=[0, 1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a)))
self.assertEqual(op.compose(op2, qargs=[0, 1], front=True), Operator(targ))
# op2 qargs=[2, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b)))
self.assertEqual(op.compose(op2, qargs=[2, 0], front=True), Operator(targ))
# op1 qargs=[0]
targ = np.dot(mat, np.kron(np.eye(4), mat_a))
self.assertEqual(op.compose(op1, qargs=[0], front=True), Operator(targ))
# op1 qargs=[1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2))))
self.assertEqual(op.compose(op1, qargs=[1], front=True), Operator(targ))
# op1 qargs=[2]
targ = np.dot(mat, np.kron(mat_a, np.eye(4)))
self.assertEqual(op.compose(op1, qargs=[2], front=True), Operator(targ))
def test_power(self):
"""Test power method."""
X90 = la.expm(-1j * 0.5 * np.pi * np.array([[0, 1], [1, 0]]) / 2)
op = Operator(X90)
self.assertEqual(op.power(2), Operator([[0, -1j], [-1j, 0]]))
self.assertEqual(op.power(4), Operator(-1 * np.eye(2)))
self.assertEqual(op.power(8), Operator(np.eye(2)))
def test_expand(self):
"""Test expand method."""
mat1 = self.UX
mat2 = np.eye(3, dtype=complex)
mat21 = np.kron(mat2, mat1)
op21 = Operator(mat1).expand(Operator(mat2))
self.assertEqual(op21.dim, (6, 6))
assert_allclose(op21.data, Operator(mat21).data)
mat12 = np.kron(mat1, mat2)
op12 = Operator(mat2).expand(Operator(mat1))
self.assertEqual(op12.dim, (6, 6))
assert_allclose(op12.data, Operator(mat12).data)
def test_tensor(self):
"""Test tensor method."""
mat1 = self.UX
mat2 = np.eye(3, dtype=complex)
mat21 = np.kron(mat2, mat1)
op21 = Operator(mat2).tensor(Operator(mat1))
self.assertEqual(op21.dim, (6, 6))
assert_allclose(op21.data, Operator(mat21).data)
mat12 = np.kron(mat1, mat2)
op12 = Operator(mat1).tensor(Operator(mat2))
self.assertEqual(op12.dim, (6, 6))
assert_allclose(op12.data, Operator(mat12).data)
def test_power_except(self):
"""Test power method raises exceptions."""
op = Operator(self.rand_matrix(3, 3))
# Non-integer power raises error
self.assertRaises(QiskitError, op.power, 0.5)
def test_add(self):
"""Test add method."""
mat1 = self.rand_matrix(4, 4)
mat2 = self.rand_matrix(4, 4)
op1 = Operator(mat1)
op2 = Operator(mat2)
self.assertEqual(op1._add(op2), Operator(mat1 + mat2))
self.assertEqual(op1 + op2, Operator(mat1 + mat2))
self.assertEqual(op1 - op2, Operator(mat1 - mat2))
def test_add_except(self):
"""Test add method raises exceptions."""
op1 = Operator(self.rand_matrix(2, 2))
op2 = Operator(self.rand_matrix(3, 3))
self.assertRaises(QiskitError, op1._add, op2)
def test_multiply(self):
"""Test multiply method."""
mat = self.rand_matrix(4, 4)
val = np.exp(5j)
op = Operator(mat)
self.assertEqual(op._multiply(val), Operator(val * mat))
self.assertEqual(val * op, Operator(val * mat))
def test_multiply_except(self):
"""Test multiply method raises exceptions."""
op = Operator(self.rand_matrix(2, 2))
self.assertRaises(QiskitError, op._multiply, 's')
self.assertRaises(QiskitError, op.__rmul__, 's')
self.assertRaises(QiskitError, op._multiply, op)
self.assertRaises(QiskitError, op.__rmul__, op)
def test_negate(self):
"""Test negate method"""
mat = self.rand_matrix(4, 4)
op = Operator(mat)
self.assertEqual(-op, Operator(-1 * mat))
def test_equiv(self):
"""Test negate method"""
mat = np.diag([1, np.exp(1j * np.pi / 2)])
phase = np.exp(-1j * np.pi / 4)
op = Operator(mat)
self.assertTrue(op.equiv(phase * mat))
self.assertTrue(op.equiv(Operator(phase * mat)))
self.assertFalse(op.equiv(2 * mat))
if __name__ == '__main__':
unittest.main()
| 38.826316
| 90
| 0.592246
| 3,107
| 22,131
| 4.11812
| 0.087866
| 0.118406
| 0.091676
| 0.025791
| 0.641657
| 0.594764
| 0.552716
| 0.50934
| 0.446424
| 0.414224
| 0
| 0.042437
| 0.252542
| 22,131
| 569
| 91
| 38.894552
| 0.731048
| 0.114093
| 0
| 0.353383
| 0
| 0
| 0.005893
| 0
| 0
| 0
| 0
| 0
| 0.338346
| 1
| 0.100251
| false
| 0
| 0.030075
| 0
| 0.160401
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbf954bdb4324156034054e74ee082a9dc8b9157
| 6,151
|
py
|
Python
|
tests/test_helpers.py
|
ajdavis/aiohttp
|
d5138978f3e82aa82a2f003b00d38112c58a40c1
|
[
"Apache-2.0"
] | 1
|
2021-07-07T06:36:57.000Z
|
2021-07-07T06:36:57.000Z
|
tests/test_helpers.py
|
ajdavis/aiohttp
|
d5138978f3e82aa82a2f003b00d38112c58a40c1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_helpers.py
|
ajdavis/aiohttp
|
d5138978f3e82aa82a2f003b00d38112c58a40c1
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from unittest import mock
from aiohttp import helpers
import datetime
def test_parse_mimetype_1():
assert helpers.parse_mimetype('') == ('', '', '', {})
def test_parse_mimetype_2():
assert helpers.parse_mimetype('*') == ('*', '*', '', {})
def test_parse_mimetype_3():
assert (helpers.parse_mimetype('application/json') ==
('application', 'json', '', {}))
def test_parse_mimetype_4():
assert (
helpers.parse_mimetype('application/json; charset=utf-8') ==
('application', 'json', '', {'charset': 'utf-8'}))
def test_parse_mimetype_5():
assert (
helpers.parse_mimetype('''application/json; charset=utf-8;''') ==
('application', 'json', '', {'charset': 'utf-8'}))
def test_parse_mimetype_6():
assert(
helpers.parse_mimetype('ApPlIcAtIoN/JSON;ChaRseT="UTF-8"') ==
('application', 'json', '', {'charset': 'UTF-8'}))
def test_parse_mimetype_7():
assert (
helpers.parse_mimetype('application/rss+xml') ==
('application', 'rss', 'xml', {}))
def test_parse_mimetype_8():
assert (
helpers.parse_mimetype('text/plain;base64') ==
('text', 'plain', '', {'base64': ''}))
def test_basic_auth1():
# missing password here
with pytest.raises(ValueError):
helpers.BasicAuth(None)
def test_basic_auth2():
with pytest.raises(ValueError):
helpers.BasicAuth('nkim', None)
def test_basic_auth3():
auth = helpers.BasicAuth('nkim')
assert auth.login == 'nkim'
assert auth.password == ''
def test_basic_auth4():
auth = helpers.BasicAuth('nkim', 'pwd')
assert auth.login == 'nkim'
assert auth.password == 'pwd'
assert auth.encode() == 'Basic bmtpbTpwd2Q='
def test_invalid_formdata_params():
with pytest.raises(TypeError):
helpers.FormData('asdasf')
def test_invalid_formdata_params2():
with pytest.raises(TypeError):
helpers.FormData('as') # 2-char str is not allowed
def test_invalid_formdata_content_type():
form = helpers.FormData()
invalid_vals = [0, 0.1, {}, [], b'foo']
for invalid_val in invalid_vals:
with pytest.raises(TypeError):
form.add_field('foo', 'bar', content_type=invalid_val)
def test_invalid_formdata_filename():
form = helpers.FormData()
invalid_vals = [0, 0.1, {}, [], b'foo']
for invalid_val in invalid_vals:
with pytest.raises(TypeError):
form.add_field('foo', 'bar', filename=invalid_val)
def test_invalid_formdata_content_transfer_encoding():
form = helpers.FormData()
invalid_vals = [0, 0.1, {}, [], b'foo']
for invalid_val in invalid_vals:
with pytest.raises(TypeError):
form.add_field('foo',
'bar',
content_transfer_encoding=invalid_val)
def test_access_logger_format():
log_format = '%T {%{SPAM}e} "%{ETag}o" %X {X} %%P'
mock_logger = mock.Mock()
access_logger = helpers.AccessLogger(mock_logger, log_format)
expected = '%s {%s} "%s" %%X {X} %%%s'
assert expected == access_logger._log_format
@mock.patch("aiohttp.helpers.datetime")
@mock.patch("os.getpid")
def test_access_logger_atoms(mock_getpid, mock_datetime):
utcnow = datetime.datetime(1843, 1, 1, 0, 0)
mock_datetime.datetime.utcnow.return_value = utcnow
mock_getpid.return_value = 42
log_format = '%a %t %P %l %u %r %s %b %O %T %Tf %D'
mock_logger = mock.Mock()
access_logger = helpers.AccessLogger(mock_logger, log_format)
message = mock.Mock(headers={}, method="GET", path="/path", version=(1, 1))
environ = {}
response = mock.Mock(headers={}, output_length=123,
body_length=42, status=200)
transport = mock.Mock()
transport.get_extra_info.return_value = ("127.0.0.2", 1234)
access_logger.log(message, environ, response, transport, 3.1415926)
assert not mock_logger.exception.called
expected = ('127.0.0.2 [01/Jan/1843:00:00:00 +0000] <42> - - '
'GET /path HTTP/1.1 200 42 123 3 3.141593 3141593')
mock_logger.info.assert_called_with(expected)
def test_access_logger_dicts():
log_format = '%{User-Agent}i %{Content-Length}o %{SPAM}e %{None}i'
mock_logger = mock.Mock()
access_logger = helpers.AccessLogger(mock_logger, log_format)
message = mock.Mock(headers={"USER-AGENT": "Mock/1.0"}, version=(1, 1))
environ = {"SPAM": "EGGS"}
response = mock.Mock(headers={"CONTENT-LENGTH": 123})
transport = mock.Mock()
transport.get_extra_info.return_value = ("127.0.0.2", 1234)
access_logger.log(message, environ, response, transport, 0.0)
assert not mock_logger.error.called
expected = 'Mock/1.0 123 EGGS -'
mock_logger.info.assert_called_with(expected)
def test_logger_no_message_and_environ():
mock_logger = mock.Mock()
mock_transport = mock.Mock()
mock_transport.get_extra_info.return_value = ("127.0.0.3", 0)
access_logger = helpers.AccessLogger(mock_logger, "%r %{FOOBAR}e")
access_logger.log(None, None, None, mock_transport, 0.0)
mock_logger.info.assert_called_with("- -")
def test_reify():
class A:
@helpers.reify
def prop(self):
return 1
a = A()
assert 1 == a.prop
def test_reify_class():
class A:
@helpers.reify
def prop(self):
"""Docstring."""
return 1
assert isinstance(A.prop, helpers.reify)
assert 'Docstring.' == A.prop.__doc__
def test_reify_assignment():
class A:
@helpers.reify
def prop(self):
return 1
a = A()
with pytest.raises(AttributeError):
a.prop = 123
def test_requote_uri_with_unquoted_percents():
# Ensure we handle unquoted percent signs in redirects.
bad_uri = 'http://example.com/fiz?buz=%ppicture'
quoted = 'http://example.com/fiz?buz=%25ppicture'
assert quoted == helpers.requote_uri(bad_uri)
def test_requote_uri_properly_requotes():
# Ensure requoting doesn't break expectations.
quoted = 'http://example.com/fiz?buz=%25ppicture'
assert quoted == helpers.requote_uri(quoted)
| 29.572115
| 79
| 0.643635
| 789
| 6,151
| 4.806084
| 0.224335
| 0.047996
| 0.025316
| 0.042194
| 0.541403
| 0.516878
| 0.427215
| 0.400053
| 0.375791
| 0.352057
| 0
| 0.033818
| 0.206796
| 6,151
| 207
| 80
| 29.714976
| 0.74339
| 0.025687
| 0
| 0.367347
| 0
| 0.013605
| 0.144719
| 0.012868
| 0
| 0
| 0
| 0
| 0.163265
| 1
| 0.197279
| false
| 0.013605
| 0.027211
| 0.013605
| 0.265306
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbf96ddd90a5808166e52168da5cadf7b4bb5c35
| 372
|
py
|
Python
|
GenConfigs.py
|
truls/faas-profiler
|
d54ca0d9926f38c693f616ba4d08414aea823f51
|
[
"MIT"
] | null | null | null |
GenConfigs.py
|
truls/faas-profiler
|
d54ca0d9926f38c693f616ba4d08414aea823f51
|
[
"MIT"
] | null | null | null |
GenConfigs.py
|
truls/faas-profiler
|
d54ca0d9926f38c693f616ba4d08414aea823f51
|
[
"MIT"
] | null | null | null |
from os.path import join
FAAS_ROOT="/lhome/trulsas/faas-profiler"
WORKLOAD_SPECS=join(FAAS_ROOT, "specs", "workloads")
#FAAS_ROOT="/home/truls/uni/phd/faas-profiler"
WSK_PATH = "wsk"
OPENWHISK_PATH = "/lhome/trulsas/openwhisk"
#: Location of output data
DATA_DIR = join(FAAS_ROOT, "..", "profiler_results")
SYSTEM_CPU_SET = "0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30"
| 28.615385
| 61
| 0.736559
| 64
| 372
| 4.109375
| 0.6875
| 0.121673
| 0.136882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079179
| 0.083333
| 372
| 12
| 62
| 31
| 0.692082
| 0.188172
| 0
| 0
| 0
| 0.142857
| 0.43
| 0.313333
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbfbafe90d8d62c542ce03ef8a862cdef8687b06
| 5,288
|
py
|
Python
|
radssh/hostkey.py
|
Eli-Tarrago/radssh
|
ebf3c8f17c3768268dcd483e899a590698de4452
|
[
"BSD-3-Clause"
] | 39
|
2015-05-11T15:06:58.000Z
|
2021-12-29T07:24:23.000Z
|
radssh/hostkey.py
|
Eli-Tarrago/radssh
|
ebf3c8f17c3768268dcd483e899a590698de4452
|
[
"BSD-3-Clause"
] | 45
|
2015-01-05T22:11:18.000Z
|
2021-06-02T03:57:49.000Z
|
radssh/hostkey.py
|
eorochena/radssh
|
b1d1ee5822036445f26a34147452df5c3142caee
|
[
"BSD-3-Clause"
] | 13
|
2015-05-05T12:42:09.000Z
|
2022-03-03T18:09:49.000Z
|
#
# Copyright (c) 2014, 2016, 2018, 2020 LexisNexis Risk Data Management Inc.
#
# This file is part of the RadSSH software package.
#
# RadSSH is free software, released under the Revised BSD License.
# You are permitted to use, modify, and redsitribute this software
# according to the Revised BSD License, a copy of which should be
# included with the distribution as file LICENSE.txt
#
'''HostKey Handling Module'''
import os
import threading
import warnings
import paramiko.hostkeys
# Deprecated as of 1.1 - Use known_hosts rewrite instead if using this API
warnings.warn(FutureWarning('RadSSH hostkey module is no longer supported, and will be removed in release 2.0. Port existing code to use radssh.known_hosts instead.'))
class CodeMap(object):
'''CodeMap class'''
def __init__(self, **kwargs):
self._fwd = kwargs
self._reverse = {}
for k, v in kwargs.items():
self.__setattr__(k, v)
self._reverse[v] = k
def code(self, name):
'''Given a name, return the code'''
return self._fwd[name]
def name(self, code):
'''Given a code value, return the corresponding code'''
return self._reverse[code]
verify_mode = CodeMap(
# Different options for handling host key verification
# Listed in decreasing order of security/paranoia
reject=0, # Missing keys are rejected
prompt=1, # Missing keys may be accepted, based on user prompt
accept_new=2, # Missing keys automatically accepted
# After this point, key conflicts no longer hinder connections
# Using these options, you become vulnerable to spoofing and
# intercepted traffic for SSH sessions, and you don't care.
ignore=100, # Turn host key verification OFF
overwrite_blindly=666 # Concentrated evil
)
def printable_fingerprint(k):
'''Convert key fingerprint into OpenSSH printable format'''
fingerprint = k.get_fingerprint()
# Handle Python3 bytes or Python2 8-bit string style...
if isinstance(fingerprint[0], int):
seq = [int(x) for x in fingerprint]
else:
seq = [ord(x) for x in fingerprint]
return ':'.join(['%02x' % x for x in seq])
class HostKeyVerifier(object):
'''Class to control how (if) host keys are verified'''
def __init__(self, mode='reject', known_hosts_file='~/.ssh/known_hosts'):
self.mode = verify_mode.code(mode)
self.hostkeys = paramiko.hostkeys.HostKeys()
self.lock = threading.Lock()
if mode == verify_mode.ignore:
return
self.known_hosts_file = os.path.expanduser(known_hosts_file)
if os.path.exists(self.known_hosts_file):
self.hostkeys.load(self.known_hosts_file)
elif not os.path.exists(os.path.dirname(self.known_hosts_file)):
os.makedirs(os.path.dirname(self.known_hosts_file))
def verify_host_key(self, hostname, key):
'''Verify a single hostkey against a hostname or IP'''
if self.mode == verify_mode.ignore:
return True
# Special formatting for non-standard ports...
if ':' not in hostname:
lookup_name = hostname
elif hostname.endswith(':22'):
lookup_name = hostname[:-3]
else:
host_base, port_base = hostname.rsplit(':', 1)
lookup_name = '[%s]:%s' % (host_base, port_base)
# Try remainder of host verification with locking
self.lock.acquire()
if self.hostkeys.check(lookup_name, key):
self.lock.release()
return True
host_entry = self.hostkeys.lookup(lookup_name)
actual = printable_fingerprint(key)
if host_entry and key.get_name() in host_entry:
# Entry mismatch
expected = printable_fingerprint(host_entry[key.get_name()])
print('Host key mismatch for (%s)' % lookup_name)
print('Expected:', expected)
print('Got :', actual)
if self.mode == verify_mode.overwrite_blindly:
print('Blindly accepting updated host key for %s' % lookup_name)
self.hostkeys.add(lookup_name, key.get_name(), key)
self.hostkeys.save(self.known_hosts_file)
self.lock.release()
return True
else:
# Missing key
if self.mode == verify_mode.reject:
self.lock.release()
return False
accept_and_add = False
if self.mode == verify_mode.prompt:
print('Unverified connection to "%s"' % lookup_name)
print('(Host Key Fingerprint [%s])' % actual)
answer = input('Do you want to accept this key? (y/N): ')
if answer[0].upper() == 'Y':
accept_and_add = True
if self.mode in (verify_mode.accept_new, verify_mode.overwrite_blindly):
accept_and_add = True
if accept_and_add:
print('Accepting new host key for %s' % lookup_name)
self.hostkeys.add(lookup_name, key.get_name(), key)
self.hostkeys.save(self.known_hosts_file)
self.lock.release()
return True
self.lock.release()
return False
| 39.462687
| 167
| 0.625946
| 672
| 5,288
| 4.793155
| 0.334821
| 0.037256
| 0.039118
| 0.039118
| 0.189072
| 0.092518
| 0.092518
| 0.073269
| 0.073269
| 0.073269
| 0
| 0.010493
| 0.279123
| 5,288
| 133
| 168
| 39.759399
| 0.83447
| 0.250946
| 0
| 0.222222
| 0
| 0.011111
| 0.098974
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.044444
| 0
| 0.244444
| 0.155556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbfc3c9f59db54005f9a1ad67dd376c6806f7fa6
| 14,153
|
py
|
Python
|
nuke/pymmh3.py
|
jfpanisset/Cryptomatte
|
d7c71cff17a4e8895eb17520115aa45ff66b8540
|
[
"BSD-3-Clause"
] | 543
|
2016-07-07T15:31:01.000Z
|
2022-03-31T10:58:32.000Z
|
nuke/pymmh3.py
|
jfpanisset/Cryptomatte
|
d7c71cff17a4e8895eb17520115aa45ff66b8540
|
[
"BSD-3-Clause"
] | 143
|
2016-07-07T16:56:38.000Z
|
2022-02-23T23:16:52.000Z
|
nuke/pymmh3.py
|
jfpanisset/Cryptomatte
|
d7c71cff17a4e8895eb17520115aa45ff66b8540
|
[
"BSD-3-Clause"
] | 158
|
2016-07-07T16:41:49.000Z
|
2022-03-21T17:57:28.000Z
|
'''
pymmh3 was written by Fredrik Kihlander and enhanced by Swapnil Gusani, and is placed in the public
domain. The authors hereby disclaim copyright to this source code.
pure python implementation of the murmur3 hash algorithm
https://code.google.com/p/smhasher/wiki/MurmurHash3
This was written for the times when you do not want to compile c-code and install modules,
and you only want a drop-in murmur3 implementation.
As this is purely python it is FAR from performant and if performance is anything that is needed
a proper c-module is suggested!
This module is written to have the same format as mmh3 python package found here for simple conversions:
https://pypi.python.org/pypi/mmh3/2.3.1
'''
import sys as _sys
if (_sys.version_info > (3, 0)):
def xrange( a, b, c ):
return list(range( a, b, c))
def xencode(x):
if isinstance(x, bytes) or isinstance(x, bytearray):
return x
else:
return x.encode()
else:
def xencode(x):
return x
del _sys
def hash( key, seed = 0x0 ):
''' Implements 32bit murmur3 hash. '''
key = bytearray( xencode(key) )
def fmix( h ):
h ^= h >> 16
h = ( h * 0x85ebca6b ) & 0xFFFFFFFF
h ^= h >> 13
h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len( key )
nblocks = int( length / 4 )
h1 = seed
c1 = 0xcc9e2d51
c2 = 0x1b873593
# body
for block_start in range( 0, nblocks * 4, 4 ):
# ??? big endian?
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 13 | h1 >> 19 ) & 0xFFFFFFFF # inlined ROTL32
h1 = ( h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF
# tail
tail_index = nblocks * 4
k1 = 0
tail_size = length & 3
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
#finalization
unsigned_val = fmix( h1 ^ length )
if unsigned_val & 0x80000000 == 0:
return unsigned_val
else:
return -( (unsigned_val ^ 0xFFFFFFFF) + 1 )
def hash128( key, seed = 0x0, x64arch = True ):
''' Implements 128bit murmur3 hash. '''
def hash128_x64( key, seed ):
''' Implements 128bit murmur3 hash for x64. '''
def fmix( k ):
k ^= k >> 33
k = ( k * 0xff51afd7ed558ccd ) & 0xFFFFFFFFFFFFFFFF
k ^= k >> 33
k = ( k * 0xc4ceb9fe1a85ec53 ) & 0xFFFFFFFFFFFFFFFF
k ^= k >> 33
return k
length = len( key )
nblocks = int( length / 16 )
h1 = seed
h2 = seed
c1 = 0x87c37b91114253d5
c2 = 0x4cf5ad432745937f
#body
for block_start in range( 0, nblocks * 8, 8 ):
# ??? big endian?
k1 = key[ 2 * block_start + 7 ] << 56 | \
key[ 2 * block_start + 6 ] << 48 | \
key[ 2 * block_start + 5 ] << 40 | \
key[ 2 * block_start + 4 ] << 32 | \
key[ 2 * block_start + 3 ] << 24 | \
key[ 2 * block_start + 2 ] << 16 | \
key[ 2 * block_start + 1 ] << 8 | \
key[ 2 * block_start + 0 ]
k2 = key[ 2 * block_start + 15 ] << 56 | \
key[ 2 * block_start + 14 ] << 48 | \
key[ 2 * block_start + 13 ] << 40 | \
key[ 2 * block_start + 12 ] << 32 | \
key[ 2 * block_start + 11 ] << 24 | \
key[ 2 * block_start + 10 ] << 16 | \
key[ 2 * block_start + 9 ] << 8 | \
key[ 2 * block_start + 8 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFFFFFFFFFF
k1 = ( k1 << 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k1 = ( c2 * k1 ) & 0xFFFFFFFFFFFFFFFF
h1 ^= k1
h1 = ( h1 << 27 | h1 >> 37 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h1 = ( h1 * 5 + 0x52dce729 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( c2 * k2 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( k2 << 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k2 = ( c1 * k2 ) & 0xFFFFFFFFFFFFFFFF
h2 ^= k2
h2 = ( h2 << 31 | h2 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h2 * 5 + 0x38495ab5 ) & 0xFFFFFFFFFFFFFFFF
#tail
tail_index = nblocks * 16
k1 = 0
k2 = 0
tail_size = length & 15
if tail_size >= 15:
k2 ^= key[ tail_index + 14 ] << 48
if tail_size >= 14:
k2 ^= key[ tail_index + 13 ] << 40
if tail_size >= 13:
k2 ^= key[ tail_index + 12 ] << 32
if tail_size >= 12:
k2 ^= key[ tail_index + 11 ] << 24
if tail_size >= 11:
k2 ^= key[ tail_index + 10 ] << 16
if tail_size >= 10:
k2 ^= key[ tail_index + 9 ] << 8
if tail_size >= 9:
k2 ^= key[ tail_index + 8 ]
if tail_size > 8:
k2 = ( k2 * c2 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( k2 << 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k2 = ( k2 * c1 ) & 0xFFFFFFFFFFFFFFFF
h2 ^= k2
if tail_size >= 8:
k1 ^= key[ tail_index + 7 ] << 56
if tail_size >= 7:
k1 ^= key[ tail_index + 6 ] << 48
if tail_size >= 6:
k1 ^= key[ tail_index + 5 ] << 40
if tail_size >= 5:
k1 ^= key[ tail_index + 4 ] << 32
if tail_size >= 4:
k1 ^= key[ tail_index + 3 ] << 24
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFFFFFFFFFF
k1 = ( k1 << 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k1 = ( k1 * c2 ) & 0xFFFFFFFFFFFFFFFF
h1 ^= k1
#finalization
h1 ^= length
h2 ^= length
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h1 = fmix( h1 )
h2 = fmix( h2 )
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
return ( h2 << 64 | h1 )
def hash128_x86( key, seed ):
''' Implements 128bit murmur3 hash for x86. '''
def fmix( h ):
h ^= h >> 16
h = ( h * 0x85ebca6b ) & 0xFFFFFFFF
h ^= h >> 13
h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len( key )
nblocks = int( length / 16 )
h1 = seed
h2 = seed
h3 = seed
h4 = seed
c1 = 0x239b961b
c2 = 0xab0e9789
c3 = 0x38b34ae5
c4 = 0xa1e38b93
#body
for block_start in range( 0, nblocks * 16, 16 ):
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k2 = key[ block_start + 7 ] << 24 | \
key[ block_start + 6 ] << 16 | \
key[ block_start + 5 ] << 8 | \
key[ block_start + 4 ]
k3 = key[ block_start + 11 ] << 24 | \
key[ block_start + 10 ] << 16 | \
key[ block_start + 9 ] << 8 | \
key[ block_start + 8 ]
k4 = key[ block_start + 15 ] << 24 | \
key[ block_start + 14 ] << 16 | \
key[ block_start + 13 ] << 8 | \
key[ block_start + 12 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 19 | h1 >> 13 ) & 0xFFFFFFFF # inlined ROTL32
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 * 5 + 0x561ccd1b ) & 0xFFFFFFFF
k2 = ( c2 * k2 ) & 0xFFFFFFFF
k2 = ( k2 << 16 | k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32
k2 = ( c3 * k2 ) & 0xFFFFFFFF
h2 ^= k2
h2 = ( h2 << 17 | h2 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
h2 = ( h2 + h3 ) & 0xFFFFFFFF
h2 = ( h2 * 5 + 0x0bcaa747 ) & 0xFFFFFFFF
k3 = ( c3 * k3 ) & 0xFFFFFFFF
k3 = ( k3 << 17 | k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
k3 = ( c4 * k3 ) & 0xFFFFFFFF
h3 ^= k3
h3 = ( h3 << 15 | h3 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
h3 = ( h3 + h4 ) & 0xFFFFFFFF
h3 = ( h3 * 5 + 0x96cd1c35 ) & 0xFFFFFFFF
k4 = ( c4 * k4 ) & 0xFFFFFFFF
k4 = ( k4 << 18 | k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32
k4 = ( c1 * k4 ) & 0xFFFFFFFF
h4 ^= k4
h4 = ( h4 << 13 | h4 >> 19 ) & 0xFFFFFFFF # inlined ROTL32
h4 = ( h1 + h4 ) & 0xFFFFFFFF
h4 = ( h4 * 5 + 0x32ac3b17 ) & 0xFFFFFFFF
#tail
tail_index = nblocks * 16
k1 = 0
k2 = 0
k3 = 0
k4 = 0
tail_size = length & 15
if tail_size >= 15:
k4 ^= key[ tail_index + 14 ] << 16
if tail_size >= 14:
k4 ^= key[ tail_index + 13 ] << 8
if tail_size >= 13:
k4 ^= key[ tail_index + 12 ]
if tail_size > 12:
k4 = ( k4 * c4 ) & 0xFFFFFFFF
k4 = ( k4 << 18 | k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32
k4 = ( k4 * c1 ) & 0xFFFFFFFF
h4 ^= k4
if tail_size >= 12:
k3 ^= key[ tail_index + 11 ] << 24
if tail_size >= 11:
k3 ^= key[ tail_index + 10 ] << 16
if tail_size >= 10:
k3 ^= key[ tail_index + 9 ] << 8
if tail_size >= 9:
k3 ^= key[ tail_index + 8 ]
if tail_size > 8:
k3 = ( k3 * c3 ) & 0xFFFFFFFF
k3 = ( k3 << 17 | k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
k3 = ( k3 * c4 ) & 0xFFFFFFFF
h3 ^= k3
if tail_size >= 8:
k2 ^= key[ tail_index + 7 ] << 24
if tail_size >= 7:
k2 ^= key[ tail_index + 6 ] << 16
if tail_size >= 6:
k2 ^= key[ tail_index + 5 ] << 8
if tail_size >= 5:
k2 ^= key[ tail_index + 4 ]
if tail_size > 4:
k2 = ( k2 * c2 ) & 0xFFFFFFFF
k2 = ( k2 << 16 | k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32
k2 = ( k2 * c3 ) & 0xFFFFFFFF
h2 ^= k2
if tail_size >= 4:
k1 ^= key[ tail_index + 3 ] << 24
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
#finalization
h1 ^= length
h2 ^= length
h3 ^= length
h4 ^= length
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 + h3 ) & 0xFFFFFFFF
h1 = ( h1 + h4 ) & 0xFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFF
h3 = ( h1 + h3 ) & 0xFFFFFFFF
h4 = ( h1 + h4 ) & 0xFFFFFFFF
h1 = fmix( h1 )
h2 = fmix( h2 )
h3 = fmix( h3 )
h4 = fmix( h4 )
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 + h3 ) & 0xFFFFFFFF
h1 = ( h1 + h4 ) & 0xFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFF
h3 = ( h1 + h3 ) & 0xFFFFFFFF
h4 = ( h1 + h4 ) & 0xFFFFFFFF
return ( h4 << 96 | h3 << 64 | h2 << 32 | h1 )
key = bytearray( xencode(key) )
if x64arch:
return hash128_x64( key, seed )
else:
return hash128_x86( key, seed )
def hash64( key, seed = 0x0, x64arch = True ):
''' Implements 64bit murmur3 hash. Returns a tuple. '''
hash_128 = hash128( key, seed, x64arch )
unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF
if unsigned_val1 & 0x8000000000000000 == 0:
signed_val1 = unsigned_val1
else:
signed_val1 = -( (unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
unsigned_val2 = ( hash_128 >> 64 ) & 0xFFFFFFFFFFFFFFFF
if unsigned_val2 & 0x8000000000000000 == 0:
signed_val2 = unsigned_val2
else:
signed_val2 = -( (unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
return ( int( signed_val1 ), int( signed_val2 ) )
def hash_bytes( key, seed = 0x0, x64arch = True ):
''' Implements 128bit murmur3 hash. Returns a byte string. '''
hash_128 = hash128( key, seed, x64arch )
bytestring = ''
for i in range(0, 16, 1):
lsbyte = hash_128 & 0xFF
bytestring = bytestring + str( chr( lsbyte ) )
hash_128 = hash_128 >> 8
return bytestring
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser( 'pymurmur3', 'pymurmur [options] "string to hash"' )
parser.add_argument( '--seed', type = int, default = 0 )
parser.add_argument( 'strings', default = [], nargs='+')
opts = parser.parse_args()
for str_to_hash in opts.strings:
sys.stdout.write( '"%s" = 0x%08X\n' % ( str_to_hash, hash( str_to_hash ) ) )
| 31.311947
| 104
| 0.464283
| 1,633
| 14,153
| 3.920392
| 0.142682
| 0.053733
| 0.06248
| 0.034989
| 0.473133
| 0.406748
| 0.385036
| 0.365979
| 0.328491
| 0.27054
| 0
| 0.146457
| 0.419628
| 14,153
| 451
| 105
| 31.381375
| 0.632944
| 0.096093
| 0
| 0.518182
| 0
| 0
| 0.006369
| 0
| 0
| 0
| 0.111496
| 0
| 0
| 1
| 0.036364
| false
| 0
| 0.006061
| 0.006061
| 0.087879
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbfc56fb832ee5fc9af604dacd2a35c059519b31
| 950
|
py
|
Python
|
bindings/python/tests/test_factory.py
|
pscff/dlite
|
4365d828dcaa1736cc78ff6ed9a65592f198ba25
|
[
"MIT"
] | 10
|
2020-04-08T06:25:27.000Z
|
2022-03-15T06:54:53.000Z
|
bindings/python/tests/test_factory.py
|
pscff/dlite
|
4365d828dcaa1736cc78ff6ed9a65592f198ba25
|
[
"MIT"
] | 117
|
2019-12-16T14:43:41.000Z
|
2022-03-21T19:46:58.000Z
|
bindings/python/tests/test_factory.py
|
pscff/dlite
|
4365d828dcaa1736cc78ff6ed9a65592f198ba25
|
[
"MIT"
] | 5
|
2020-04-15T16:23:29.000Z
|
2021-12-07T08:40:54.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import dlite
thisdir = os.path.abspath(os.path.dirname(__file__))
class Person:
def __init__(self, name, age, skills):
self.name = name
self.age = age
self.skills = skills
def __repr__(self):
return 'Person(%r, %r, %r)' % (self.name, self.age, list(self.skills))
url = 'json://' + thisdir + '/Person.json'
print('-- create: ExPerson')
ExPerson = dlite.classfactory(Person, url=url)
print('-- create: person1')
person1 = Person('Jack Daniel', 42, ['distilling', 'tasting'])
print('-- create: person2')
person2 = ExPerson('Jack Daniel', 42, ['distilling', 'tasting'])
person2.dlite_inst.save('json', 'persons.json', 'mode=w')
# Print json-representation of person2 using dlite
print(person2.dlite_inst.asjson(indent=2))
person3 = dlite.loadfactory(Person, 'json://persons.json')
person4 = dlite.objectfactory(person1, meta=person2.dlite_meta)
| 25
| 78
| 0.671579
| 123
| 950
| 5.065041
| 0.455285
| 0.038523
| 0.035313
| 0.070626
| 0.093098
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021144
| 0.153684
| 950
| 37
| 79
| 25.675676
| 0.753731
| 0.095789
| 0
| 0
| 0
| 0
| 0.220794
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.095238
| 0.047619
| 0.285714
| 0.190476
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbfcfb1df1954ace1963bc30983b96adb222d711
| 807
|
py
|
Python
|
week_11_DS_N_Algorithm/03_Thr_Lecture/실습6_연속 부분 최대합.py
|
bky373/elice-racer-1st
|
ddea8079a1083796ed4f59c38650ff8f4333e6ef
|
[
"FSFAP"
] | 1
|
2021-11-03T18:27:37.000Z
|
2021-11-03T18:27:37.000Z
|
week_11_DS_N_Algorithm/03_Thr_Lecture/실습6_연속 부분 최대합.py
|
bky373/elice-racer-1st
|
ddea8079a1083796ed4f59c38650ff8f4333e6ef
|
[
"FSFAP"
] | null | null | null |
week_11_DS_N_Algorithm/03_Thr_Lecture/실습6_연속 부분 최대합.py
|
bky373/elice-racer-1st
|
ddea8079a1083796ed4f59c38650ff8f4333e6ef
|
[
"FSFAP"
] | 1
|
2021-02-10T15:21:53.000Z
|
2021-02-10T15:21:53.000Z
|
'''
연속 부분 최대합
nn개의 숫자가 주어질 때, 연속 부분을 선택하여 그 합을 최대화 하는 프로그램을 작성하시오.
예를 들어, 다음과 같이 8개의 숫자가 있다고 하자.
1 2 -4 5 3 -2 9 -10
이 때, 연속 부분이란 연속하여 숫자를 선택하는 것을 말한다.
가능한 연속 부분으로써 [1, 2, -4], [5, 3, -2, 9], [9, -10] 등이 있을 수 있다.
이 연속 부분들 중에서 가장 합이 큰 연속 부분은 [5, 3, -2, 9] 이며,
이보다 더 합을 크게 할 수는 없다.
따라서 연속 부분 최대합은 5+3+(-2)+9 = 15 이다.
입력 예시
1 2 -4 5 3 -2 9 -10
출력 예시
15
문제 조건
입력되는 수의 개수는 최대 100개입니다.
'''
import sys
def getSubsum(data) :
'''
n개의 숫자가 list로 주어질 때, 그 연속 부분 최대합을 반환하는 함수를 작성하세요.
'''
dp = [0] * len(data)
dp[0] = data[0]
for i in range(1, len(data)):
dp[i] = max(dp[i-1] + data[i], data[i])
return max(dp)
def main():
'''
이 부분은 수정하지 마세요.
'''
data = [int(x) for x in input().split()]
print(getSubsum(data))
if __name__ == "__main__":
main()
| 17.170213
| 61
| 0.537794
| 174
| 807
| 2.448276
| 0.568966
| 0.023474
| 0.035211
| 0.046948
| 0.058685
| 0.058685
| 0.058685
| 0.042254
| 0
| 0
| 0
| 0.087189
| 0.303594
| 807
| 46
| 62
| 17.543478
| 0.670819
| 0.556382
| 0
| 0
| 0
| 0
| 0.025
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.083333
| 0
| 0.333333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbfd45a1262d4d81ad4ca682e226d591f37c7fd4
| 1,490
|
py
|
Python
|
tests/conftest.py
|
zhongnansu/es-cli
|
e0656c21392e52a8b9cfafa69acfa0c13b743a9c
|
[
"Apache-2.0"
] | 6
|
2019-08-23T18:06:41.000Z
|
2020-05-06T18:26:53.000Z
|
tests/conftest.py
|
zhongnansu/es-cli
|
e0656c21392e52a8b9cfafa69acfa0c13b743a9c
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
zhongnansu/es-cli
|
e0656c21392e52a8b9cfafa69acfa0c13b743a9c
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2019, Amazon Web Services Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
We can define the fixture functions in this file to make them
accessible across multiple test modules.
"""
import os
import pytest
from utils import create_index, delete_index, get_connection
@pytest.fixture(scope="function")
def connection():
test_connection = get_connection()
create_index(test_connection)
yield test_connection
delete_index(test_connection)
@pytest.fixture(scope="function")
def default_config_location():
from escli.conf import __file__ as package_root
package_root = os.path.dirname(package_root)
default_config = os.path.join(package_root, "esclirc")
yield default_config
@pytest.fixture(scope="session", autouse=True)
def temp_config(tmpdir_factory):
# this function runs on start of test session.
# use temporary directory for conf home so user conf will not be used
os.environ["XDG_CONFIG_HOME"] = str(tmpdir_factory.mktemp("data"))
| 29.8
| 73
| 0.769799
| 219
| 1,490
| 5.114155
| 0.561644
| 0.053571
| 0.048214
| 0.028571
| 0.069643
| 0.069643
| 0
| 0
| 0
| 0
| 0
| 0.006364
| 0.156376
| 1,490
| 49
| 74
| 30.408163
| 0.884646
| 0.454362
| 0
| 0.111111
| 0
| 0
| 0.070605
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.222222
| 0
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbfd8140aa71c6ce6288cd86d96c8cf8754cf91f
| 26,845
|
py
|
Python
|
Cogs/ServerStats.py
|
Damiian1/techwizardshardware
|
97ceafc15036be4136e860076d73d74f1887f041
|
[
"MIT"
] | null | null | null |
Cogs/ServerStats.py
|
Damiian1/techwizardshardware
|
97ceafc15036be4136e860076d73d74f1887f041
|
[
"MIT"
] | null | null | null |
Cogs/ServerStats.py
|
Damiian1/techwizardshardware
|
97ceafc15036be4136e860076d73d74f1887f041
|
[
"MIT"
] | null | null | null |
import asyncio
import discord
from datetime import datetime
from operator import itemgetter
from discord.ext import commands
from Cogs import Nullify
from Cogs import DisplayName
from Cogs import UserTime
from Cogs import Message
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(ServerStats(bot, settings))
class ServerStats:
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
async def message(self, message):
# Check the message and see if we should allow it - always yes.
# This module doesn't need to cancel messages.
# Don't count your own, Pooter
if not message.author.id == self.bot.user.id:
server = message.guild
messages = int(self.settings.getServerStat(server, "TotalMessages"))
if messages == None:
messages = 0
messages += 1
self.settings.setServerStat(server, "TotalMessages", messages)
return { 'Ignore' : False, 'Delete' : False}
@commands.command(pass_context=True)
async def serverinfo(self, ctx, *, guild_name = None):
"""Lists some info about the current or passed server."""
# Check if we passed another guild
guild = None
if guild_name == None:
guild = ctx.guild
else:
for g in self.bot.guilds:
if g.name.lower() == guild_name.lower():
guild = g
break
if str(g.id) == str(guild_name):
guild = g
break
if guild == None:
# We didn't find it
await ctx.send("I couldn't find that guild...")
return
server_embed = discord.Embed(color=ctx.author.color)
server_embed.title = guild.name
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, guild.created_at)
time_str = "{} {}".format(local_time['time'], local_time['zone'])
server_embed.description = "Created at {}".format(time_str)
online_members = 0
bot_member = 0
bot_online = 0
for member in guild.members:
if member.bot:
bot_member += 1
if not member.status == discord.Status.offline:
bot_online += 1
continue
if not member.status == discord.Status.offline:
online_members += 1
# bot_percent = "{:,g}%".format((bot_member/len(guild.members))*100)
user_string = "{:,}/{:,} online ({:,g}%)".format(
online_members,
len(guild.members) - bot_member,
round((online_members/(len(guild.members) - bot_member) * 100), 2)
)
b_string = "bot" if bot_member == 1 else "bots"
user_string += "\n{:,}/{:,} {} online ({:,g}%)".format(
bot_online,
bot_member,
b_string,
round((bot_online/bot_member)*100, 2)
)
#server_embed.add_field(name="Members", value="{:,}/{:,} online ({:.2f}%)\n{:,} {} ({}%)".format(online_members, len(guild.members), bot_percent), inline=True)
server_embed.add_field(name="Members ({:,} total)".format(len(guild.members)), value=user_string, inline=True)
server_embed.add_field(name="Roles", value=str(len(guild.roles)), inline=True)
chandesc = "{:,} text, {:,} voice".format(len(guild.text_channels), len(guild.voice_channels))
server_embed.add_field(name="Channels", value=chandesc, inline=True)
server_embed.add_field(name="Default Role", value=guild.default_role, inline=True)
server_embed.add_field(name="Owner", value=guild.owner.name + "#" + guild.owner.discriminator, inline=True)
server_embed.add_field(name="AFK Channel", value=guild.afk_channel, inline=True)
server_embed.add_field(name="Verification", value=guild.verification_level, inline=True)
server_embed.add_field(name="Voice Region", value=guild.region, inline=True)
server_embed.add_field(name="Considered Large", value=guild.large, inline=True)
# Find out where in our join position this server is
joinedList = []
popList = []
for g in self.bot.guilds:
joinedList.append({ 'ID' : g.id, 'Joined' : g.me.joined_at })
popList.append({ 'ID' : g.id, 'Population' : len(g.members) })
# sort the guilds by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'])
popList = sorted(popList, key=lambda x:x['Population'], reverse=True)
check_item = { "ID" : guild.id, "Joined" : guild.me.joined_at }
total = len(joinedList)
position = joinedList.index(check_item) + 1
server_embed.add_field(name="Join Position", value="{:,} of {:,}".format(position, total), inline=True)
# Get our population position
check_item = { "ID" : guild.id, "Population" : len(guild.members) }
total = len(popList)
position = popList.index(check_item) + 1
server_embed.add_field(name="Population Rank", value="{:,} of {:,}".format(position, total), inline=True)
emojitext = ""
emojicount = 0
for emoji in guild.emojis:
if emoji.animated:
emojiMention = "<a:"+emoji.name+":"+str(emoji.id)+">"
else:
emojiMention = "<:"+emoji.name+":"+str(emoji.id)+">"
test = emojitext + emojiMention
if len(test) > 1024:
# TOOO BIIIIIIIIG
emojicount += 1
if emojicount == 1:
ename = "Emojis ({:,} total)".format(len(guild.emojis))
else:
ename = "Emojis (Continued)"
server_embed.add_field(name=ename, value=emojitext, inline=True)
emojitext=emojiMention
else:
emojitext = emojitext + emojiMention
if len(emojitext):
if emojicount == 0:
emojiname = "Emojis ({} total)".format(len(guild.emojis))
else:
emojiname = "Emojis (Continued)"
server_embed.add_field(name=emojiname, value=emojitext, inline=True)
if len(guild.icon_url):
server_embed.set_thumbnail(url=guild.icon_url)
else:
# No Icon
server_embed.set_thumbnail(url=ctx.author.default_avatar_url)
server_embed.set_footer(text="Server ID: {}".format(guild.id))
await ctx.channel.send(embed=server_embed)
@commands.command(pass_context=True)
async def sharedservers(self, ctx, *, member = None):
"""Lists how many servers you share with the bot."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if member == None:
member = ctx.author
if type(member) is str:
member_check = DisplayName.memberForName(member, ctx.guild)
if not member_check:
msg = "I couldn't find *{}* on this server...".format(member)
if suppress:
msg = Nullify.clean(msg)
await ctx.send(msg)
return
member = member_check
if member.id == self.bot.user.id:
count = len(self.bot.guilds)
if count == 1:
await ctx.send("I'm on *1* server. :blush:")
else:
await ctx.send("I'm on *{}* servers. :blush:".format(count))
return
count = 0
for guild in self.bot.guilds:
for mem in guild.members:
if mem.id == member.id:
count += 1
if ctx.author.id == member.id:
targ = "You share"
else:
targ = "*{}* shares".format(DisplayName.name(member))
if count == 1:
await ctx.send("{} *1* server with me. :blush:".format(targ))
else:
await ctx.send("{} *{}* servers with me. :blush:".format(targ, count))
@commands.command(pass_context=True)
async def listservers(self, ctx, number : int = 10):
"""Lists the servers I'm connected to - default is 10, max is 50."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 50:
number = 50
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
i = 1
msg = '__**Servers I\'m On:**__\n\n'
for server in self.bot.guilds:
if i > number:
break
msg += '{}. *{}*\n'.format(i, server.name)
i += 1
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def topservers(self, ctx, number : int = 10):
"""Lists the top servers I'm connected to ordered by population - default is 10, max is 50."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 50:
number = 50
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
serverList = []
for server in self.bot.guilds:
memberCount = 0
for member in server.members:
memberCount += 1
serverList.append({ 'Name' : server.name, 'Users' : memberCount })
# sort the servers by population
serverList = sorted(serverList, key=lambda x:int(x['Users']), reverse=True)
if number > len(serverList):
number = len(serverList)
i = 1
msg = ''
for server in serverList:
if i > number:
break
msg += '{}. *{}* - *{:,}* members\n'.format(i, server['Name'], server['Users'])
i += 1
if number < len(serverList):
msg = '__**Top {} of {} Servers:**__\n\n'.format(number, len(serverList))+msg
else:
msg = '__**Top {} Servers:**__\n\n'.format(len(serverList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def bottomservers(self, ctx, number : int = 10):
"""Lists the bottom servers I'm connected to ordered by population - default is 10, max is 50."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 50:
number = 50
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
serverList = []
for server in self.bot.guilds:
serverList.append({ 'Name' : server.name, 'Users' : len(server.members) })
# sort the servers by population
serverList = sorted(serverList, key=lambda x:int(x['Users']))
if number > len(serverList):
number = len(serverList)
i = 1
msg = ''
for server in serverList:
if i > number:
break
msg += '{}. *{}* - *{:,}* members\n'.format(i, server['Name'], server['Users'])
i += 1
if number < len(serverList):
msg = '__**Bottom {} of {} Servers:**__\n\n'.format(number, len(serverList))+msg
else:
msg = '__**Bottom {} Servers:**__\n\n'.format(len(serverList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def users(self, ctx):
"""Lists the total number of users on all servers I'm connected to."""
message = await Message.EmbedText(title="Counting users...", color=ctx.message.author).send(ctx)
servers = members = membersOnline = bots = botsOnline = 0
counted_users = []
counted_bots = []
for server in self.bot.guilds:
servers += 1
for member in server.members:
if member.bot:
bots += 1
if not member.id in counted_bots:
counted_bots.append(member.id)
if not member.status == discord.Status.offline:
botsOnline += 1
else:
members += 1
if not member.id in counted_users:
counted_users.append(member.id)
if not member.status == discord.Status.offline:
membersOnline += 1
await Message.Embed(
title="Member Stats",
description="Current User Information".format(server.name),
fields=[
{ "name" : "Servers", "value" : "└─ {:,}".format(servers), "inline" : False },
{ "name" : "Users", "value" : "└─ {:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)".format(membersOnline, members, round((membersOnline/members)*100, 2), len(counted_users), round((len(counted_users)/members)*100, 2)), "inline" : False},
{ "name" : "Bots", "value" : "└─ {:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)".format(botsOnline, bots, round((botsOnline/bots)*100, 2), len(counted_bots), round(len(counted_bots)/bots*100, 2)), "inline" : False},
{ "name" : "Total", "value" : "└─ {:,}/{:,} online ({:,g}%)".format(membersOnline + botsOnline, members+bots, round(((membersOnline + botsOnline)/(members+bots))*100, 2)), "inline" : False}
],
color=ctx.message.author).edit(ctx, message)
'''userCount = 0
serverCount = 0
counted_users = []
message = await ctx.send("Counting users...")
for server in self.bot.guilds:
serverCount += 1
userCount += len(server.members)
for member in server.members:
if not member.id in counted_users:
counted_users.append(member.id)
await message.edit(content='There are *{:,} users* (*{:,}* unique) on the *{:,} servers* I am currently a part of!'.format(userCount, len(counted_users), serverCount))'''
@commands.command(pass_context=True)
async def joinpos(self, ctx, *, member = None):
"""Tells when a user joined compared to other users."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if member == None:
member = ctx.author
if type(member) is str:
member_check = DisplayName.memberForName(member, ctx.guild)
if not member_check:
msg = "I couldn't find *{}* on this server...".format(member)
if suppress:
msg = Nullify.clean(msg)
await ctx.send(msg)
return
member = member_check
joinedList = []
for mem in ctx.message.guild.members:
joinedList.append({ 'ID' : mem.id, 'Joined' : mem.joined_at })
# sort the users by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'])
check_item = { "ID" : member.id, "Joined" : member.joined_at }
total = len(joinedList)
position = joinedList.index(check_item) + 1
before = ""
after = ""
msg = "*{}'s* join position is **{:,}**.".format(DisplayName.name(member), position, total)
if position-1 == 1:
# We have previous members
before = "**1** user"
elif position-1 > 1:
before = "**{:,}** users".format(position-1)
if total-position == 1:
# There were users after as well
after = "**1** user"
elif total-position > 1:
after = "**{:,}** users".format(total-position)
# Build the string!
if len(before) and len(after):
# Got both
msg += "\n\n{} joined before, and {} after.".format(before, after)
elif len(before):
# Just got before
msg += "\n\n{} joined before.".format(before)
elif len(after):
# Just after
msg += "\n\n{} joined after.".format(after)
await ctx.send(msg)
@commands.command(pass_context=True)
async def firstjoins(self, ctx, number : int = 10):
"""Lists the first users to join - default is 10, max is 25."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 25:
number = 25
if number < 1:
await ctx.channel.send('Oookay - look! No users! Just like you wanted!')
return
joinedList = []
for member in ctx.message.guild.members:
joinedList.append({ 'ID' : member.id, 'Joined' : member.joined_at })
# sort the users by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'])
i = 1
msg = ''
for member in joinedList:
if i > number:
break
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined'])
time_str = "{} {}".format(local_time['time'], local_time['zone'])
msg += '{}. *{}* - *{}*\n'.format(i, DisplayName.name(DisplayName.memberForID(member['ID'], ctx.message.guild)), time_str)
i += 1
if number < len(joinedList):
msg = '__**First {} of {} Members to Join:**__\n\n'.format(number, len(joinedList))+msg
else:
msg = '__**First {} Members to Join:**__\n\n'.format(len(joinedList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def recentjoins(self, ctx, number : int = 10):
"""Lists the most recent users to join - default is 10, max is 25."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 25:
number = 25
if number < 1:
await ctx.channel.send('Oookay - look! No users! Just like you wanted!')
return
joinedList = []
for member in ctx.message.guild.members:
joinedList.append({ 'ID' : member.id, 'Joined' : member.joined_at })
# sort the users by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'], reverse=True)
i = 1
msg = ''
for member in joinedList:
if i > number:
break
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined'])
time_str = "{} {}".format(local_time['time'], local_time['zone'])
msg += '{}. *{}* - *{}*\n'.format(i, DisplayName.name(DisplayName.memberForID(member['ID'], ctx.message.guild)), time_str)
i += 1
if number < len(joinedList):
msg = '__**Last {} of {} Members to Join:**__\n\n'.format(number, len(joinedList))+msg
else:
msg = '__**Last {} Members to Join:**__\n\n'.format(len(joinedList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def firstservers(self, ctx, number : int = 10):
"""Lists the first servers I've joined - default is 10, max is 25."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 25:
number = 25
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
joinedList = []
for guild in self.bot.guilds:
botmember = DisplayName.memberForID(self.bot.user.id, guild)
joinedList.append({ 'Name' : guild.name, 'Joined' : botmember.joined_at, 'Members': len(guild.members) })
# sort the servers by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'])
i = 1
msg = ''
for member in joinedList:
if i > number:
break
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined'])
time_str = "{} {}".format(local_time['time'], local_time['zone'])
if member['Members'] == 1:
msg += '{}. *{}* - *{}* - *(1 member)*\n'.format(i, member['Name'], time_str)
else:
msg += '{}. *{}* - *{}* - *({} members)*\n'.format(i, member['Name'], time_str, member['Members'])
i += 1
if number < len(joinedList):
msg = '__**First {} of {} Servers I Joined:**__\n\n'.format(number, len(joinedList))+msg
else:
msg = '__**First {} Servers I Joined:**__\n\n'.format(len(joinedList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def recentservers(self, ctx, number : int = 10):
"""Lists the most recent users to join - default is 10, max is 25."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 25:
number = 25
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
joinedList = []
for guild in self.bot.guilds:
botmember = DisplayName.memberForID(self.bot.user.id, guild)
joinedList.append({ 'Name' : guild.name, 'Joined' : botmember.joined_at, 'Members': len(guild.members) })
# sort the servers by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'], reverse=True)
i = 1
msg = ''
for member in joinedList:
if i > number:
break
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined'])
time_str = "{} {}".format(local_time['time'], local_time['zone'])
if member['Members'] == 1:
msg += '{}. *{}* - *{}* - *(1 member)*\n'.format(i, member['Name'], time_str)
else:
msg += '{}. *{}* - *{}* - *({} members)*\n'.format(i, member['Name'], time_str, member['Members'])
i += 1
if number < len(joinedList):
msg = '__**Last {} of {} Servers I Joined:**__\n\n'.format(number, len(joinedList))+msg
else:
msg = '__**Last {} Servers I Joined:**__\n\n'.format(len(joinedList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def messages(self, ctx):
"""Lists the number of messages I've seen on this sever so far. (only applies after this module's inception, and if I'm online)"""
messages = int(self.settings.getServerStat(ctx.message.guild, "TotalMessages"))
messages -= 1
self.settings.setServerStat(ctx.message.guild, "TotalMessages", messages)
if messages == None:
messages = 0
if messages == 1:
await ctx.channel.send('So far, I\'ve witnessed *{:,} message!*'.format(messages))
else:
await ctx.channel.send('So far, I\'ve witnessed *{:,} messages!*'.format(messages))
@commands.command(pass_context=True)
async def allmessages(self, ctx):
"""Lists the number of messages I've seen on all severs so far. (only applies after this module's inception, and if I'm online)"""
messages = 0
for guild in self.bot.guilds:
temp = 0 if self.settings.getServerStat(guild, "TotalMessages") is None else self.settings.getServerStat(guild, "TotalMessages")
messages += int(temp)
messages -= 1
if messages == 1:
await ctx.channel.send('So far, I\'ve witnessed *{:,} message across all servers!*'.format(messages))
else:
await ctx.channel.send('So far, I\'ve witnessed *{:,} messages across all servers!*'.format(messages))
# Set our message count locally -1
messages = int(self.settings.getServerStat(ctx.message.guild, "TotalMessages"))
messages -= 1
self.settings.setServerStat(ctx.message.guild, "TotalMessages", messages)
| 42.076803
| 254
| 0.537568
| 2,975
| 26,845
| 4.783529
| 0.099496
| 0.01574
| 0.020027
| 0.025367
| 0.684211
| 0.63699
| 0.614012
| 0.537489
| 0.530181
| 0.518727
| 0
| 0.010217
| 0.33645
| 26,845
| 637
| 255
| 42.142857
| 0.788245
| 0.062209
| 0
| 0.593291
| 0
| 0.012579
| 0.117514
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004193
| false
| 0.027254
| 0.018868
| 0
| 0.050314
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbfe9381e4f6dcc57fd5c5d02265d7f565b40315
| 2,857
|
py
|
Python
|
torch_datasets/samplers/balanced_batch_sampler.py
|
mingruimingrui/torch-datasets
|
2640b8c4fa82156e68e617fc545a546b4e08dc4e
|
[
"MIT"
] | null | null | null |
torch_datasets/samplers/balanced_batch_sampler.py
|
mingruimingrui/torch-datasets
|
2640b8c4fa82156e68e617fc545a546b4e08dc4e
|
[
"MIT"
] | null | null | null |
torch_datasets/samplers/balanced_batch_sampler.py
|
mingruimingrui/torch-datasets
|
2640b8c4fa82156e68e617fc545a546b4e08dc4e
|
[
"MIT"
] | null | null | null |
import random
import torch.utils.data.sampler
class BalancedBatchSampler(torch.utils.data.sampler.BatchSampler):
def __init__(
self,
dataset_labels,
batch_size=1,
steps=None,
n_classes=0,
n_samples=2
):
""" Create a balanced batch sampler for label based datasets
Args
dataset_labels : Labels of every entry from a dataset (in the same sequence)
batch_size : batch_size no explaination needed
step_size : Number of batches to generate (if None, then dataset_size / batch_size will be used)
n_classes : Number of classes
n_samples : Number of samples per class
*** If batch_size > n_classes * n_samples, rest of batch will be randomly filled
"""
self.batch_size = batch_size
self.steps = len(dataset_labels) // batch_size if steps is None else steps
self.n_classes = n_classes
self.n_samples = n_samples
# Create a label_to_entry_ids table
self.label_to_entry_ids = {}
for entry_id, label in enumerate(dataset_labels):
if label in self.label_to_entry_ids:
self.label_to_entry_ids[label].append(entry_id)
else:
self.label_to_entry_ids[label] = [entry_id]
# Subset the labels with more than n_samples entries
self.labels_subset = [label for (label, entry_ids) in self.label_to_entry_ids.items() if len(entry_ids) >= n_samples]
assert len(self.labels_subset) >= n_classes, 'Too little labels have {} entries, choose a smaller n_classes or n_samples'.format(n_samples)
def _make_batch_ids(self):
batch_ids = []
# Choose classes and entries
labels_choosen = random.sample(self.labels_subset, self.n_classes)
# Randomly sample n_samples entries from choosen labels
for l in labels_choosen:
batch_ids += random.sample(self.label_to_entry_ids[l], self.n_samples)
if len(batch_ids) < self.batch_size:
# Randomly sample remainder
labels_choosen = {l: None for l in labels_choosen}
remaining_entry_ids = []
for label, entry_ids in self.label_to_entry_ids.items():
if label not in labels_choosen:
remaining_entry_ids += entry_ids
batch_ids += random.sample(remaining_entry_ids, self.batch_size - len(batch_ids))
# Randomly shuffle batch ids
batch_ids = random.sample(batch_ids, self.batch_size)
batch_ids = torch.LongTensor(batch_ids)
return batch_ids
def __iter__(self):
self.count = 0
while self.count < self.steps:
self.count += 1
yield self._make_batch_ids()
def __len__(self):
return self.steps
| 38.608108
| 147
| 0.637382
| 381
| 2,857
| 4.493438
| 0.246719
| 0.070093
| 0.056075
| 0.070093
| 0.216122
| 0.129089
| 0.051402
| 0.051402
| 0.051402
| 0.051402
| 0
| 0.002475
| 0.292965
| 2,857
| 73
| 148
| 39.136986
| 0.84505
| 0.243962
| 0
| 0
| 0
| 0
| 0.035526
| 0
| 0
| 0
| 0
| 0
| 0.022222
| 1
| 0.088889
| false
| 0
| 0.044444
| 0.022222
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbfe9b7374d292dd3a07ffc92b4ebb9e7af2ac5d
| 1,416
|
py
|
Python
|
ambari-common/src/main/python/resource_management/libraries/functions/get_bare_principal.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 1,664
|
2015-01-03T09:35:21.000Z
|
2022-03-31T04:55:24.000Z
|
ambari-common/src/main/python/resource_management/libraries/functions/get_bare_principal.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 3,018
|
2015-02-19T20:16:10.000Z
|
2021-11-13T20:47:48.000Z
|
ambari-common/src/main/python/resource_management/libraries/functions/get_bare_principal.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 1,673
|
2015-01-06T14:14:42.000Z
|
2022-03-31T07:22:30.000Z
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import re
__all__ = ["get_bare_principal"]
def get_bare_principal(normalized_principal_name):
"""
Given a normalized principal name (nimbus/c6501.ambari.apache.org@EXAMPLE.COM) returns just the
primary component (nimbus)
:param normalized_principal_name: a string containing the principal name to process
:return: a string containing the primary component value or None if not valid
"""
bare_principal = None
if normalized_principal_name:
match = re.match(r"([^/@]+)(?:/[^@])?(?:@.*)?", normalized_principal_name)
if match:
bare_principal = match.group(1)
return bare_principal
| 33.714286
| 97
| 0.764831
| 206
| 1,416
| 5.165049
| 0.529126
| 0.056391
| 0.108083
| 0.030075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007563
| 0.159605
| 1,416
| 42
| 98
| 33.714286
| 0.886555
| 0.759181
| 0
| 0
| 0
| 0
| 0.139241
| 0.082278
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbff3b375851c03b4ae31fb20b30423a4b9c6ad5
| 1,162
|
py
|
Python
|
04/cross_validation.01.py
|
study-machine-learning/dongheon.shin
|
6103ef9c73b162603bc39a27e4ecca0f1ac35e57
|
[
"MIT"
] | 2
|
2017-09-24T02:29:48.000Z
|
2017-10-05T11:15:22.000Z
|
04/cross_validation.01.py
|
study-machine-learning/dongheon.shin
|
6103ef9c73b162603bc39a27e4ecca0f1ac35e57
|
[
"MIT"
] | null | null | null |
04/cross_validation.01.py
|
study-machine-learning/dongheon.shin
|
6103ef9c73b162603bc39a27e4ecca0f1ac35e57
|
[
"MIT"
] | null | null | null |
from sklearn import svm, metrics
import random
import re
def split(rows):
data = []
labels = []
for row in rows:
data.append(row[0:4])
labels.append(row[4])
return (data, labels)
def calculate_score(train, test):
train_data, train_label = split(train)
test_data, test_label = split(test)
classifier = svm.SVC()
classifier.fit(train_data, train_label)
predict = classifier.predict(test_data)
return metrics.accuracy_score(test_label, predict)
def to_number(n):
return float(n) if re.match(r"^[0-9\.]+$", n) else n
def to_columm(line):
return list(map(to_number, line.strip().split(",")))
lines = open("iris.csv", "r", encoding="utf-8").read().split("\n")
csv = list(map(to_columm, lines))
del csv[0]
random.shuffle(csv)
k = 5
csv_k = [[] for i in range(k)]
scores = []
for i in range(len(csv)):
csv_k[i % k].append(csv[i])
for test in csv_k:
train = []
for data in csv_k:
if test != data:
train += data
score = calculate_score(train, test)
scores.append(score)
print("score = ", scores)
print("avg = ", sum(scores) / len(scores))
| 16.84058
| 66
| 0.620482
| 175
| 1,162
| 4.011429
| 0.354286
| 0.02849
| 0.054131
| 0.065527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008869
| 0.223752
| 1,162
| 68
| 67
| 17.088235
| 0.769401
| 0
| 0
| 0
| 0
| 0
| 0.035284
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.076923
| 0.051282
| 0.282051
| 0.051282
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e004ab57c3294086a91490c226d7c40f3986ad7f
| 4,265
|
py
|
Python
|
src/reg_resampler.py
|
atif-hassan/Regression_ReSampling
|
194b2ae8efea7598a6690792d40e42aba74c111b
|
[
"BSD-3-Clause"
] | 15
|
2020-06-09T20:08:04.000Z
|
2021-11-21T15:58:09.000Z
|
src/reg_resampler.py
|
atif-hassan/Regression_ReSampling
|
194b2ae8efea7598a6690792d40e42aba74c111b
|
[
"BSD-3-Clause"
] | null | null | null |
src/reg_resampler.py
|
atif-hassan/Regression_ReSampling
|
194b2ae8efea7598a6690792d40e42aba74c111b
|
[
"BSD-3-Clause"
] | 5
|
2020-06-13T22:07:51.000Z
|
2021-05-21T03:26:45.000Z
|
class resampler:
def __init__(self):
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from collections import Counter
import numpy as np
self.bins = 3
self.pd = pd
self.LabelEncoder = LabelEncoder
self.Counter = Counter
self.X = 0
self.Y_classes = 0
self.target = 0
self.np = np
# This function adds classes to each sample and returns the class list as a dataframe/numpy array (as per input)
# It also merges classes as and when required
def fit(self, X, target, bins=3, min_n_samples=6, balanced_binning=False, verbose=2):
self.bins = bins
tmp = target
# If data is numpy, then convert it into pandas
if type(target) == int:
if target < 0:
target = X.shape[1]+target
tmp = target
self.X = self.pd.DataFrame()
for i in range(X.shape[1]):
if i!=target:
self.X[str(i)] = X[:,i]
self.X["target"] = X[:,target]
target = "target"
else:
self.X = X.copy()
# Use qcut if balanced binning is required
if balanced_binning:
self.Y_classes = self.pd.qcut(self.X[target], q=self.bins, precision=0)
else:
self.Y_classes = self.pd.cut(self.X[target], bins=self.bins)
# Pandas outputs ranges after binning. Convert ranges to classes
le = self.LabelEncoder()
self.Y_classes = le.fit_transform(self.Y_classes)
# Merge classes if number of neighbours is more than the number of samples
classes_count = list(map(list, self.Counter(self.Y_classes).items()))
classes_count = sorted(classes_count, key = lambda x: x[0])
mid_point = len(classes_count)
# Logic for merging
for i in range(len(classes_count)):
if classes_count[i][1] < min_n_samples:
self.Y_classes[self.np.where(self.Y_classes == classes_count[i][0])[0]] = classes_count[i-1][0]
if verbose > 0:
print("INFO: Class " + str(classes_count[i][0]) + " has been merged into Class " + str(classes_count[i-1][0]) + " due to low number of samples")
classes_count[i][0] = classes_count[i-1][0]
if verbose > 0:
print()
# Perform label-encoding once again
# Avoids class skipping after merging
le = self.LabelEncoder()
self.Y_classes = le.fit_transform(self.Y_classes)
# Pretty print
if verbose > 1:
print("Class Distribution:\n-------------------")
classes_count = list(map(list, self.Counter(self.Y_classes).items()))
classes_count = sorted(classes_count, key = lambda x: x[0])
for class_, count in classes_count:
print(str(class_)+": "+str(count))
print()
# Finally concatenate and return as dataframe or numpy
# Based on what type of target was sent
self.X["classes"] = self.Y_classes
if type(tmp) == int:
self.target = tmp
else:
self.target = target
return self.Y_classes
# This function performs the re-sampling
def resample(self, sampler_obj, trainX, trainY):
# If classes haven't yet been created, then run the "fit" function
if type(self.Y_classes) == int:
print("Error! Run fit method first!!")
return None
# Finally, perform the re-sampling
resampled_data, _ = sampler_obj.fit_resample(trainX, trainY)
if type(resampled_data).__module__ == 'numpy':
resampled_data = self.pd.DataFrame(resampled_data, columns=self.X.drop("classes", axis=1).columns)
# Return the correct X and Y
if type(self.target) == int:
return resampled_data.drop("target", axis=1).values, resampled_data["target"].values
else:
return resampled_data.drop(self.target, axis=1), resampled_data[self.target]
| 41.813725
| 169
| 0.562251
| 537
| 4,265
| 4.351955
| 0.277467
| 0.082157
| 0.071887
| 0.023962
| 0.206247
| 0.154044
| 0.154044
| 0.154044
| 0.154044
| 0.154044
| 0
| 0.010604
| 0.336694
| 4,265
| 101
| 170
| 42.227723
| 0.815483
| 0.171864
| 0
| 0.25
| 0
| 0
| 0.053603
| 0.009959
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.055556
| 0
| 0.166667
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0063a8b35dfc827fe158a159fe5d8b8ab703065
| 4,920
|
py
|
Python
|
get_data/speech_commands.py
|
patrick-kidger/generalised_shapelets
|
04930c89dc4673e2af402895fe67655f8375a808
|
[
"MIT"
] | 32
|
2020-05-31T17:41:58.000Z
|
2022-03-28T18:38:11.000Z
|
get_data/speech_commands.py
|
patrick-kidger/generalised_shapelets
|
04930c89dc4673e2af402895fe67655f8375a808
|
[
"MIT"
] | 1
|
2022-02-09T22:13:03.000Z
|
2022-02-09T23:55:28.000Z
|
get_data/speech_commands.py
|
patrick-kidger/generalised_shapelets
|
04930c89dc4673e2af402895fe67655f8375a808
|
[
"MIT"
] | 9
|
2020-07-17T16:50:24.000Z
|
2021-12-13T11:29:12.000Z
|
import os
import pathlib
import sklearn.model_selection
import tarfile
import torch
import torchaudio
import urllib.request
here = pathlib.Path(__file__).resolve().parent
def _split_data(tensor, stratify):
# 0.7/0.15/0.15 train/val/test split
(train_tensor, testval_tensor,
train_stratify, testval_stratify) = sklearn.model_selection.train_test_split(tensor, stratify,
train_size=0.7,
random_state=0,
shuffle=True,
stratify=stratify)
val_tensor, test_tensor = sklearn.model_selection.train_test_split(testval_tensor,
train_size=0.5,
random_state=1,
shuffle=True,
stratify=testval_stratify)
return train_tensor, val_tensor, test_tensor
def _save_data(dir, **tensors):
for tensor_name, tensor_value in tensors.items():
torch.save(tensor_value, str(dir / tensor_name) + '.pt')
def download():
base_base_loc = str(here / '../experiments/data')
if not os.path.exists(base_base_loc):
raise RuntimeError("data directory does not exist. Please create a directory called 'data' in the 'experiments'"
" directory. (We're going to put a lot of data there, so we don't make it automatically - "
"thus giving you the opportunity to make it a symlink rather than a normal directory, so "
"that the data can be stored elsewhere if you wish.)")
base_loc = base_base_loc + '/SpeechCommands'
loc = base_loc + '/speech_commands.tar.gz'
if os.path.exists(loc):
return
if not os.path.exists(base_loc):
os.mkdir(base_loc)
urllib.request.urlretrieve('http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz',
loc)
with tarfile.open(loc, 'r') as f:
f.extractall(base_loc)
def _process_data():
base_loc = here / '..' / 'experiments' / 'data' / 'SpeechCommands'
X = torch.empty(34975, 16000, 1)
y = torch.empty(34975, dtype=torch.long)
batch_index = 0
y_index = 0
for foldername in ('yes', 'no', 'up', 'down', 'left', 'right', 'on', 'off', 'stop', 'go'):
loc = base_loc / foldername
for filename in os.listdir(loc):
audio, _ = torchaudio.load_wav(loc / filename, channels_first=False,
normalization=False) # for forward compatbility if they fix it
audio = audio / 2 ** 15 # Normalization argument doesn't seem to work so we do it manually.
# A few samples are shorter than the full length; for simplicity we discard them.
if len(audio) != 16000:
continue
X[batch_index] = audio
y[batch_index] = y_index
batch_index += 1
y_index += 1
assert batch_index == 34975, "batch_index is {}".format(batch_index)
audio_X = X
# X is of shape (batch=34975, length=16000, channels=1)
X = torchaudio.transforms.MFCC(log_mels=True)(X.squeeze(-1)).transpose(1, 2).detach()
# X is of shape (batch=34975, length=81, channels=40). For some crazy reason it requires a gradient, so detach.
train_X, _, _ = _split_data(X, y)
out = []
means = []
stds = []
for Xi, train_Xi in zip(X.unbind(dim=-1), train_X.unbind(dim=-1)):
mean = train_Xi.mean()
std = train_Xi.std()
means.append(mean)
stds.append(std)
out.append((Xi - mean) / (std + 1e-5))
X = torch.stack(out, dim=-1)
train_audio_X, val_audio_X, test_audio_X = _split_data(audio_X, y)
train_X, val_X, test_X = _split_data(X, y)
train_y, val_y, test_y = _split_data(y, y)
return train_X, val_X, test_X, train_y, val_y, test_y, torch.stack(means), torch.stack(stds), train_audio_X, \
val_audio_X, test_audio_X
def main():
download()
(train_X, val_X, test_X, train_y, val_y, test_y, means, stds, train_audio_X, val_audio_X,
test_audio_X) = _process_data()
loc = here / '..' / 'experiments' / 'data' / 'speech_commands_data'
if not os.path.exists(loc):
os.mkdir(loc)
_save_data(loc, train_X=train_X, val_X=val_X, test_X=test_X, train_y=train_y, val_y=val_y, test_y=test_y,
means=means, stds=stds, train_audio_X=train_audio_X, val_audio_X=val_audio_X, test_audio_X=test_audio_X)
if __name__ == '__main__':
main()
| 42.051282
| 120
| 0.566057
| 637
| 4,920
| 4.125589
| 0.301413
| 0.038813
| 0.020928
| 0.026636
| 0.186073
| 0.158295
| 0.089422
| 0.069635
| 0.060122
| 0.048706
| 0
| 0.02373
| 0.331911
| 4,920
| 116
| 121
| 42.413793
| 0.775783
| 0.078049
| 0
| 0.022472
| 0
| 0.011236
| 0.125414
| 0.005078
| 0
| 0
| 0
| 0
| 0.011236
| 1
| 0.05618
| false
| 0
| 0.078652
| 0
| 0.168539
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e008ab01b4020e37d916e20d303c66a51a23123e
| 5,652
|
py
|
Python
|
app/endpoints/products.py
|
duch94/spark_crud_test
|
94a514797700c2e929792f0424fb0e9e911489b7
|
[
"BSD-2-Clause"
] | null | null | null |
app/endpoints/products.py
|
duch94/spark_crud_test
|
94a514797700c2e929792f0424fb0e9e911489b7
|
[
"BSD-2-Clause"
] | null | null | null |
app/endpoints/products.py
|
duch94/spark_crud_test
|
94a514797700c2e929792f0424fb0e9e911489b7
|
[
"BSD-2-Clause"
] | null | null | null |
from datetime import datetime
from typing import List
from flask import Blueprint, jsonify, request, json
from app.models.products import Product, Category, products_categories
from app import db
products_blueprint = Blueprint('products', __name__)
def create_or_get_categories(p: dict) -> List[Category]:
"""
Func to get existing categories objects or create new otherwise
:param p: payload of request
:return: list of categories
"""
recevied_categories: List[Category] = [Category(name=cat) for cat in p['categories']]
categories = []
for cat in recevied_categories:
exists = db.session.query(db.exists().where(Category.name == cat.name)).all()[0][0]
if exists:
existing_category = Category.query.filter(Category.name == cat.name).all()[0]
categories.append(existing_category)
else:
categories.append(cat)
return categories
@products_blueprint.route('/products', methods=['GET'])
def get_products():
return jsonify({
'results': [p.serialized for p in Product.query.all()]
})
@products_blueprint.route('/create_product', methods=['POST'])
def create_product():
data = request.get_data().decode('utf-8')
payload = json.loads(data)
datetime_format = '%Y-%m-%d %H:%M:%S'
if len(payload['categories']) < 1 or len(payload['categories']) > 5:
return '{"status": "error", "msg": "categories number must be between 1 and 5"}', 400
categories = create_or_get_categories(payload)
try:
new_prod = Product(name=payload['name'],
rating=float(payload['rating']),
featured=bool(payload['featured'] if 'featured' in payload.keys() else None),
expiration_date=(datetime.strptime(payload['expiration_date'], datetime_format)
if ('expiration_date' in payload.keys()) else None),
brand_id=int(payload['brand_id']),
items_in_stock=int(payload['items_in_stock']),
receipt_date=(datetime.strptime(payload['receipt_date'], datetime_format)
if ('receipt_date' in payload.keys()) else None))
except TypeError as e:
return '{"status": "error", "msg": "TypeError occured: check values of fields"}'
except KeyError as e:
return '{"status": "error", "msg": "field %s have not been found, but is required"}' % str(e), 400
if new_prod.rating > 8.0:
new_prod.featured = True
[cat.products.append(new_prod) for cat in categories]
[db.session.add(cat) for cat in categories]
db.session.commit()
return jsonify({"status": "ok", "msg": "product received"})
@products_blueprint.route('/update_product', methods=['PUT'])
def update_product():
data = request.get_data().decode('utf-8')
payload = json.loads(data)
datetime_format = '%Y-%m-%d %H:%M:%S'
product = Product.query.filter(Product.id == payload['id'])
if product:
if 'name' in payload.keys():
product.update({'name': payload['name']})
if 'featured' in payload.keys():
product.update({'featured': bool(payload['featured'])})
if 'rating' in payload.keys():
product.update({'rating': float(payload['rating'])})
if product.rating > 8.0:
product.featured = True
if 'items_in_stock' in payload.keys():
product.update({'items_in_stock': int(payload['items_in_stock'])})
if 'receipt_date' in payload.keys():
product.update({'receipt_date': datetime.strptime(payload['receipt_date'], datetime_format)})
if 'brand' in payload.keys():
product.update({'brand': int(payload['brand'])})
if 'categories' in payload.keys():
categories = create_or_get_categories(payload)
db.session.query(products_categories).filter(
products_categories.c.product_id == int(payload['id'])).delete(synchronize_session=False)
product_obj = product.all()[0]
[cat.products.append(product_obj) for cat in categories]
[db.session.add(cat) for cat in categories]
if 'expiration_date' in payload.keys():
product.update({'expiration_date': datetime.strptime(payload['expiration_date'], datetime_format)})
db.session.commit()
return jsonify({"status": "ok", "msg": "product updated"})
else:
return '{"status": "error", "msg": "no product found with given id"}', 404
@products_blueprint.route('/delete_product', methods=['DELETE'])
def delete_product():
data = request.get_data().decode('utf-8')
p = json.loads(data)
products_result = Product.query.filter(Product.id == int(p['id'])).delete(synchronize_session=False)
products_categories_result = db.session.query(products_categories).filter(
products_categories.c.product_id == int(p['id'])).delete(synchronize_session=False)
db.session.commit()
if products_result == 1:
return jsonify({"status": "ok",
"msg": "product deleted, also %d product_categories relations deleted"
% products_categories_result})
else:
return jsonify({"status": "warning", "msg": "%d products deleted, also %d product_categories relations deleted"
% (products_result, products_categories_result)})
| 45.580645
| 120
| 0.608457
| 648
| 5,652
| 5.169753
| 0.205247
| 0.029552
| 0.042687
| 0.041791
| 0.498806
| 0.408358
| 0.304179
| 0.304179
| 0.241791
| 0.152836
| 0
| 0.005947
| 0.256193
| 5,652
| 123
| 121
| 45.95122
| 0.790913
| 0.021231
| 0
| 0.171717
| 0
| 0.010101
| 0.178087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050505
| false
| 0
| 0.050505
| 0.010101
| 0.20202
| 0.060606
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e008c8c892e467ea561589969c08eaa2c9b808db
| 1,701
|
py
|
Python
|
util/config/validators/test/test_validate_bitbucket_trigger.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 2,027
|
2019-11-12T18:05:48.000Z
|
2022-03-31T22:25:04.000Z
|
util/config/validators/test/test_validate_bitbucket_trigger.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 496
|
2019-11-12T18:13:37.000Z
|
2022-03-31T10:43:45.000Z
|
util/config/validators/test/test_validate_bitbucket_trigger.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 249
|
2019-11-12T18:02:27.000Z
|
2022-03-22T12:19:19.000Z
|
import pytest
from httmock import urlmatch, HTTMock
from util.config import URLSchemeAndHostname
from util.config.validator import ValidatorContext
from util.config.validators import ConfigValidationException
from util.config.validators.validate_bitbucket_trigger import BitbucketTriggerValidator
from test.fixtures import *
@pytest.mark.parametrize(
"unvalidated_config",
[
(ValidatorContext({})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {}})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {"CONSUMER_KEY": "foo"}})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {"CONSUMER_SECRET": "foo"}})),
],
)
def test_validate_invalid_bitbucket_trigger_config(unvalidated_config, app):
validator = BitbucketTriggerValidator()
with pytest.raises(ConfigValidationException):
validator.validate(unvalidated_config)
def test_validate_bitbucket_trigger(app):
url_hit = [False]
@urlmatch(netloc=r"bitbucket.org")
def handler(url, request):
url_hit[0] = True
return {
"status_code": 200,
"content": "oauth_token=foo&oauth_token_secret=bar",
}
with HTTMock(handler):
validator = BitbucketTriggerValidator()
url_scheme_and_hostname = URLSchemeAndHostname("http", "localhost:5000")
unvalidated_config = ValidatorContext(
{
"BITBUCKET_TRIGGER_CONFIG": {
"CONSUMER_KEY": "foo",
"CONSUMER_SECRET": "bar",
},
},
url_scheme_and_hostname=url_scheme_and_hostname,
)
validator.validate(unvalidated_config)
assert url_hit[0]
| 29.842105
| 87
| 0.671958
| 156
| 1,701
| 7.051282
| 0.352564
| 0.101818
| 0.1
| 0.138182
| 0.147273
| 0.105455
| 0.105455
| 0.105455
| 0
| 0
| 0
| 0.006855
| 0.228101
| 1,701
| 56
| 88
| 30.375
| 0.830922
| 0
| 0
| 0.093023
| 0
| 0
| 0.156966
| 0.078777
| 0
| 0
| 0
| 0
| 0.023256
| 1
| 0.069767
| false
| 0
| 0.162791
| 0
| 0.255814
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e008cc40a9e990beff8a7a594350250e113f3691
| 2,414
|
py
|
Python
|
Refraction.py
|
silkoch42/Geometric-Optics-from-QM
|
baf41b54c37835b527d5b98cb480d68bc2ff68c3
|
[
"MIT"
] | null | null | null |
Refraction.py
|
silkoch42/Geometric-Optics-from-QM
|
baf41b54c37835b527d5b98cb480d68bc2ff68c3
|
[
"MIT"
] | null | null | null |
Refraction.py
|
silkoch42/Geometric-Optics-from-QM
|
baf41b54c37835b527d5b98cb480d68bc2ff68c3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 15 16:51:16 2019
@author: Silvan
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
k=1000
n1=2.0
n2=1.0
alpha=np.pi/6.0
beta=np.arcsin(n2/n1*np.sin(alpha))
ya=1.0
xa=-ya*np.tan(alpha)
yb=-1.0
xb=-yb*np.tan(beta)
def s(x):
return n1*np.sqrt((xa-x)**2+ya**2)+n2*np.sqrt((xb-x)**2+yb**2)
def kernel(xa,xb):
return 1.0/np.sqrt(xa**2+1)**(3/2.0)+1.0/np.sqrt(xa**2+1)**(3/2.0)
def K(R):
L=1000 #Maximum Number of subdivisions for integral calculations
eps=0.01
N=50
x,dx=np.linspace(0.01,R,N,retstep=True)
real=np.empty(N)
imag=np.empty(N)
real[0]=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0]
imag[0]=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0]
for i in range(1,N):
r1=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0]
r2=scipy.integrate.quad(lambda x: np.cos(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0]
real[i]=real[i-1]+r1+r2
i1=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0]
i2=scipy.integrate.quad(lambda x: np.sin(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0]
imag[i]=imag[i-1]+i1+i2
return np.sqrt(real**2+imag**2),x,real,imag
K2,x,r,i=K(3)
M=np.mean(K2[25:])
plt.plot(x,K2/M,label=r'$|\int_{-R}^{R}e^{i k s(x)}dx|^2$')
#plt.errorbar(x,K2/M,0.1*K2/M)
plt.xlabel(r'Integration range $R$')
plt.ylabel('Detection probabilty')
plt.legend(loc='best')
plt.text(2.4,0.2,r'$k=1000$')
#plt.text(1.1,0.5,r'$|\int_{-R}^{R}e^{i k s(x)}dx|^2$',fontsize=20)
plt.savefig('refraction_v3',dpi=200)
plt.show()
#N=20
#
#dx=np.linspace(0,10,N)
#
#P=np.ones(N)
#
#for i in range(N):
# print(i+1)
# P[i]=trans_amp(dx[i])
#
#
#plt.figure(1)
#plt.plot(dx,P/np.mean(P[20:]))
#plt.text(4.0,0.5,r'$|\int_{-\Delta x}^{\Delta x} e^{ik s(x)}dx$|',fontsize=20)
#plt.ylabel('Transition Amplitude')
#plt.xlabel(r'Integration Interval $ \Delta x$')
##plt.axis([0,10,0,1.1])
#plt.legend(loc='best')
##plt.savefig('refraction',dpi=200)
#plt.show()
#x=np.linspace(-5,5,100)
#
#plt.figure(2)
#plt.plot(x,s(x))
#plt.show()
#
#d=np.linspace(0,5,100)
#xa=-d/2
#xb=d/2
#plt.figure(3)
#plt.plot(d,kernel(xa,xb)**2)
#plt.show()
| 24.383838
| 95
| 0.583264
| 510
| 2,414
| 2.75098
| 0.239216
| 0.015681
| 0.017106
| 0.102637
| 0.307912
| 0.280827
| 0.275125
| 0.273699
| 0.273699
| 0.273699
| 0
| 0.076923
| 0.154515
| 2,414
| 99
| 96
| 24.383838
| 0.610485
| 0.314002
| 0
| 0
| 0
| 0
| 0.06539
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0.047619
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e00b9ee8e43ae71af00a3fe383bedc3df745f04d
| 7,574
|
py
|
Python
|
train.py
|
vnbot2/BigGAN-PyTorch
|
1725269d52e05fbd4d06dac64aa4906a8ae7a760
|
[
"MIT"
] | null | null | null |
train.py
|
vnbot2/BigGAN-PyTorch
|
1725269d52e05fbd4d06dac64aa4906a8ae7a760
|
[
"MIT"
] | null | null | null |
train.py
|
vnbot2/BigGAN-PyTorch
|
1725269d52e05fbd4d06dac64aa4906a8ae7a760
|
[
"MIT"
] | null | null | null |
""" BigGAN: The Authorized Unofficial PyTorch release
Code by A. Brock and A. Andonian
This code is an unofficial reimplementation of
"Large-Scale GAN Training for High Fidelity Natural Image Synthesis,"
by A. Brock, J. Donahue, and K. Simonyan (arXiv 1809.11096).
Let's go.
"""
import datetime
import time
import torch
import dataset
import BigGAN
import train_fns
import utils
from common import *
# IMG_SIZE = 64
# IMG_SIZE_2 = IMG_SIZE * 2
def run(config):
# Update the config dict as necessary
# This is for convenience, to add settings derived from the user-specified
# configuration into the config-dict (e.g. inferring the number of classes
# and size of the images from the dataset, passing in a pytorch object
# for the activation specified as a string)
config['resolution'] = IMG_SIZE
config['n_classes'] = 1
config['G_activation'] = utils.activation_dict[config['G_nl']]
config['D_activation'] = utils.activation_dict[config['D_nl']]
# By default, skip init if resuming training.
if config['resume']:
print('Skipping initialization for training resumption...')
config['skip_init'] = True
config = utils.update_config_roots(config)
device = 'cuda'
# Seed RNG
utils.seed_rng(config['seed'])
# Prepare root folders if necessary
utils.prepare_root(config)
# Setup cudnn.benchmark for free speed
torch.backends.cudnn.benchmark = True
experiment_name = (config['experiment_name'] if config['experiment_name']
else 'generative_dog_images')
print('Experiment name is %s' % experiment_name)
G = BigGAN.Generator(**config).to(device)
D = BigGAN.Discriminator(**config).to(device)
# if config['parallel']:
G = nn.DataParallel(G)
D = nn.DataParallel(D)
# If using EMA, prepare it
if config['ema']:
print('Preparing EMA for G with decay of {}'.format(
config['ema_decay']))
G_ema = BigGAN.Generator(**{**config, 'skip_init': True,
'no_optim': True}).to(device)
G_ema = nn.DataParallel(G_ema)
ema = utils.ema(G, G_ema, config['ema_decay'], config['ema_start'])
else:
G_ema, ema = None, None
GD = BigGAN.G_D(G, D)
print(G)
print(D)
print('Number of params in G: {} D: {}'.format(
*[sum([p.data.nelement() for p in net.parameters()]) for net in [G, D]]))
# Prepare state dict, which holds things like epoch # and itr #
state_dict = {'itr': 0, 'epoch': 0, 'save_num': 0, 'config': config}
# If loading from a pre-trained model, load weights
if config['resume']:
print('Loading weights...')
utils.load_weights(G, D, state_dict,
config['weights_root'], experiment_name,
config['load_weights'] if config['load_weights'] else None,
G_ema if config['ema'] else None)
# Prepare data; the Discriminator's batch size is all that needs to be passed
# to the dataloader, as G doesn't require dataloading.
# Note that at every loader iteration we pass in enough data to complete
# a full D iteration (regardless of number of D steps and accumulations)
D_batch_size = (config['batch_size'] *
config['num_D_steps'] * config['num_D_accumulations'])
loaders = dataset.get_data_loaders(
data_root=config['data_root'],
label_root=config['label_root'],
batch_size=D_batch_size,
num_workers=config['num_workers'],
shuffle=config['shuffle'],
pin_memory=config['pin_memory'],
drop_last=True,
load_in_mem=config['load_in_mem'],
mask_out=config['mask_out']
)
# Prepare noise and randomly sampled label arrays
# Allow for different batch sizes in G
G_batch_size = max(config['G_batch_size'], config['batch_size'])
num_samples = config['num_fixed_samples']
z_, y_ = utils.prepare_z_y(
num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'])
# Prepare a fixed z & y to see individual sample evolution throghout training
fixed_z, fixed_y = utils.prepare_z_y(
num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'])
fixed_z.sample_()
fixed_y.sample_()
# Loaders are loaded, prepare the training function
train = train_fns.create_train_fn(
G, D, GD, z_, y_, ema, state_dict, config)
print('Beginning training at epoch %d...' % state_dict['epoch'])
start_time = time.perf_counter()
loader = loaders[0]
total_iters = config['num_epochs'] * len(loader)
# Train for specified number of epochs, although we mostly track G iterations.
pbar = tqdm(total=total_iters)
for _ in range(state_dict['itr']):
pbar.update()
timer = mmcv.Timer()
timer.start()
start_itr = state_dict['itr']
for epoch in range(state_dict['epoch'], config['num_epochs']):
for i, data in enumerate(loader):
x, y = data['img'], data['label']
# Increment the iteration counter
state_dict['itr'] += 1
# Make sure G and D are in training mode, just in case they got set to eval
# For D, which typically doesn't have BN, this shouldn't matter much.
G.train()
D.train()
if config['ema']:
G_ema.train()
x, y = x.to(device), y.to(device)
metrics = train(x, y)
if not (state_dict['itr'] % config['log_interval']):
curr_time = timer.since_start()
curr_time_str = datetime.datetime.fromtimestamp(
curr_time).strftime('%H:%M:%S')
# quang duong / (quang duong da di / thoi gian da di)
eta = (
total_iters - state_dict['itr']) // ((state_dict['itr']-start_itr) / (curr_time+1))
eta_str = datetime.datetime.fromtimestamp(
eta).strftime('%H:%M:%S')
log = "[{}] [{}] [{} / {}] Ep {}, ".format(
curr_time_str, eta_str, state_dict['itr'], total_iters, epoch)
log += ', '.join(['%s : %+4.3f' % (key, metrics[key])
for key in metrics])
pbar.set_description(log)
# print(log)
# Save weights and copies as configured at specified interval
if not (state_dict['itr'] % config['sample_every']):
if config['G_eval_mode']:
# print('Switching G to eval mode...')
G.eval()
train_fns.save_and_sample(G, D, G_ema, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name, save_weight=False)
if not (state_dict['itr'] % config['save_every']):
if config['G_eval_mode']:
# print('Switching G to eval mode...')
G.eval()
train_fns.save_and_sample(G, D, G_ema, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name, save_weight=True)
pbar.update()
# Increment epoch counter at end of epoch
state_dict['epoch'] += 1
def main():
# parse command line and run
parser = utils.prepare_parser()
config = vars(parser.parse_args())
print(config)
run(config)
if __name__ == '__main__':
main()
| 40.287234
| 103
| 0.597571
| 992
| 7,574
| 4.382056
| 0.288306
| 0.037267
| 0.027605
| 0.010352
| 0.139176
| 0.112031
| 0.096158
| 0.096158
| 0.096158
| 0.096158
| 0
| 0.005733
| 0.28611
| 7,574
| 187
| 104
| 40.502674
| 0.798225
| 0.252046
| 0
| 0.112903
| 0
| 0
| 0.142092
| 0.003749
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016129
| false
| 0
| 0.064516
| 0
| 0.080645
| 0.072581
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e00c71d6078595059b1d0af82650622e80499174
| 1,693
|
py
|
Python
|
geocamUtil/tempfiles.py
|
geocam/geocamUtilWeb
|
b64fc063c64b4b0baa140db4c126f2ff980756ab
|
[
"NASA-1.3"
] | 4
|
2017-03-03T16:24:24.000Z
|
2018-06-24T05:50:40.000Z
|
geocamUtil/tempfiles.py
|
geocam/geocamUtilWeb
|
b64fc063c64b4b0baa140db4c126f2ff980756ab
|
[
"NASA-1.3"
] | 1
|
2021-09-29T17:17:30.000Z
|
2021-09-29T17:17:30.000Z
|
geocamUtil/tempfiles.py
|
geocam/geocamUtilWeb
|
b64fc063c64b4b0baa140db4c126f2ff980756ab
|
[
"NASA-1.3"
] | 1
|
2017-12-19T20:45:53.000Z
|
2017-12-19T20:45:53.000Z
|
# __BEGIN_LICENSE__
#Copyright (c) 2015, United States Government, as represented by the
#Administrator of the National Aeronautics and Space Administration.
#All rights reserved.
# __END_LICENSE__
import os
import time
import random
import shutil
from glob import glob
import traceback
import sys
from geocamUtil import FileUtil
from django.conf import settings
def getTempName(prefix, suffix=''):
return '%s/%s-%s-%s%s' % (settings.TMP_DIR,
prefix,
time.strftime('%Y-%m-%d-%H%M'),
'%04x' % random.getrandbits(16),
suffix)
def deleteStaleFiles():
files = glob('%s/*' % settings.TMP_DIR)
now = time.time()
for f in files:
if (now - os.stat(f).st_ctime > settings.GEOCAM_UTIL_DELETE_TMP_FILE_WAIT_SECONDS and
not f.endswith('/README.txt')):
try:
os.unlink(f)
except OSError:
traceback.print_exc()
print >> sys.stderr, '[tempfiles.deleteStaleFiles: could not unlink %s]' % f
def makeTempDir(prefix):
d = getTempName(prefix)
if not os.path.exists(settings.TMP_DIR):
FileUtil.mkdirP(settings.TMP_DIR)
os.system('chmod go+rw %s' % settings.TMP_DIR)
deleteStaleFiles()
FileUtil.mkdirP(d)
return d
def initZipDir(prefix):
return makeTempDir(prefix)
def finishZipDir(zipDir):
zipFile = '%s.zip' % zipDir
oldDir = os.getcwd()
os.chdir(os.path.dirname(settings.TMP_DIR))
os.system('zip -r %s %s' % (zipFile, os.path.basename(zipDir)))
os.chdir(oldDir)
shutil.rmtree(zipDir)
return zipFile
| 27.306452
| 93
| 0.617247
| 208
| 1,693
| 4.908654
| 0.490385
| 0.064643
| 0.082272
| 0.044074
| 0.043095
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006478
| 0.270526
| 1,693
| 61
| 94
| 27.754098
| 0.820243
| 0.111636
| 0
| 0
| 0
| 0
| 0.084112
| 0.018692
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113636
| false
| 0
| 0.204545
| 0.045455
| 0.409091
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e00d7dd12724a0363ee40d8c349e7cccfb71d6f4
| 5,752
|
py
|
Python
|
Ex1:Tests/ex2.py
|
Lludion/Exercises-SE
|
4d5b2b4f2989a3e2c7891ba2b766394dbfb43973
|
[
"MIT"
] | null | null | null |
Ex1:Tests/ex2.py
|
Lludion/Exercises-SE
|
4d5b2b4f2989a3e2c7891ba2b766394dbfb43973
|
[
"MIT"
] | null | null | null |
Ex1:Tests/ex2.py
|
Lludion/Exercises-SE
|
4d5b2b4f2989a3e2c7891ba2b766394dbfb43973
|
[
"MIT"
] | null | null | null |
# Ce fichier contient (au moins) cinq erreurs.
# Instructions:
# - tester jusqu'à atteindre 100% de couverture;
# - corriger les bugs;"
# - envoyer le diff ou le dépôt git par email."""
import hypothesis
from hypothesis import given, settings
from hypothesis.strategies import integers, lists
class BinHeap:
#structure de tas binaires d'entiers
def __init__(self):
#initialise un tas binaire d'entiers avec un element 0
self.heapList = [0]
self.currentSize = 1#taille de la liste heapList (invariant)
def percUp(self,i):
#upward percolation until 0 reached or father is bigger
while i // 2 > 0 and self.heapList[i] < self.heapList[i // 2]:
tmp = self.heapList[i // 2]
self.heapList[i // 2] = self.heapList[i]
self.heapList[i] = tmp
i //= 2
def insert(self,k):
#inserting a new value into the heap
self.heapList.append(k)
self.percUp(self.currentSize)
self.currentSize = self.currentSize + 1
def percDown(self,i):
while (i * 2) < self.currentSize:#while I have a child
mc = self.minChild(i)#mc is the index of the smallest
if self.heapList[i] > self.heapList[mc]:
tmp = self.heapList[i]
self.heapList[i] = self.heapList[mc]
self.heapList[mc] = tmp
i = mc
def minChild(self,i):
if i * 2 >= self.currentSize or i == 0:
print("No Child. None is returned.")
return
if i * 2 + 1 >= self.currentSize:
return i * 2
else:
if self.heapList[i*2] < self.heapList[i*2+1]:
return i * 2
else:
return i * 2 + 1
def delMin(self):
try:
rval = self.heapList[1]
except IndexError:
print("Empty heap. Nothing is changed. None is returned.")
return
self.currentSize = self.currentSize - 1
self.heapList[1] = self.heapList[self.currentSize]
self.heapList.pop()
self.percDown(1)
return rval
def buildHeap(self,alist):
#creates a whole heap from a list, by percolating all its elements
i = 1
self.currentSize = len(alist) + 1# + 1
self.heapList = [0] + alist # enlever le [:]
while (i < self.currentSize):
self.percUp(i)
i += 1
def assert_isheaplist(x,val,lon,HL):
assert ((x * 2 + 1 > lon) or (x * 2 + 1 == lon and HL[2*x] >= val) or (HL[2*x] >= val and HL[2*x+1] >= val))
def assert_goodheap(tau,lon):
for x in range(1,lon):
assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList)
def test_init():
tau = BinHeap()
assert tau.heapList == [0]
assert tau.currentSize == 1
@given(integers())
@settings(max_examples=100)
def test_percup(integer):
gamma = [0,1,3,2,9,99,2,3,10,9,103,102,3,2,3,3]
tau = BinHeap()
tau.currentsize = 16
tau.heapList = gamma[:]
tau.percUp(15)
assert tau.heapList == gamma[:]
tau.heapList[15] = 2
tau.percUp(15)
print(tau.heapList)
assert tau.heapList == [0,1,3,2,9,99,2,2,10,9,103,102,3,2,3,3]
assert tau.currentsize == 16
tau.heapList.append(8)
tau.currentsize = 17
tau.percUp(16)
assert tau.heapList == [0,1,3,2,8,99,2,2,9,9,103,102,3,2,3,3,10]
tau.heapList.append(integer)
tau.currentsize = 18
tau.percUp(17)
assert tau.heapList[17] >= tau.heapList[8]
assert tau.heapList[8] >= tau.heapList[4]
@given(lists(elements=integers()))
@settings(max_examples=1000)
def test_build(L):
tau = BinHeap()
tau.buildHeap(L)
assert tau.currentSize == len(L) + 1
assert sorted(tau.heapList) == sorted(L+[0])
assert_goodheap(tau,len(L)+1)
#for x in range(1,len(L) + 1):
# assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList)
@given(lists(elements=integers()),integers())
@settings(max_examples=1000)
def test_insert(L,i):
tau = BinHeap()
tau.buildHeap(L)
tau.insert(i)
assert_goodheap(tau,len(L)+1)
@given(lists(elements=integers()),integers())
@settings(max_examples=100)
def test_percDown(L,i):
tau = BinHeap()
L += [10]
tau.buildHeap(L)
tau.heapList[1] = i
tau.percDown(1)
for x in range(1,len(L) + 1):
for _ in range(len(L)):
tau.percDown(x)
#then we test that we got a well-ordered heap
assert_goodheap(tau,len(L)+1)
@given(lists(elements=integers()))
@settings(max_examples=400,deadline=None)
def test_delmin(L):
L += [10]
tau = BinHeap()
assert tau.delMin() is None
tau.buildHeap(L)
#print(L)
#print("sorted",sorted(L),"\n")
#print("TAU ", tau.heapList,"\n")
assert tau.delMin() == min(L)
@given(lists(elements=integers()),integers())
@settings(max_examples=400)
def test_minChild(L,i):
tau = BinHeap()
assert tau.minChild(abs(i)) is None
tau.buildHeap(2*L+[0,1])
assert tau.minChild(len(L)+1) is not None
@given(lists(elements=integers()),lists(elements=integers()))
@settings(max_examples=400,deadline=None)
def test_general(L,K):
tau = BinHeap()
tau.buildHeap(L)#tas construit avec L
for k in K:tau.insert(k)#on rajoute les elements de K
assert_goodheap(tau,tau.currentSize)
x = []
while tau.currentSize > 1:x.append(tau.delMin())#on retire tous les elements
assert x == sorted(L + K)#verifie qu'on a bien le minimum avec delmin
assert tau.delMin() is None
x = []
tau.buildHeap(K)
for l in L:#teste si 1 suite d'insertion/ suppression maintient la structure
tau.delMin()
tau.insert(l)
assert_goodheap(tau,tau.currentSize)
| 31.26087
| 112
| 0.604312
| 839
| 5,752
| 4.108462
| 0.210965
| 0.073107
| 0.041485
| 0.05483
| 0.342617
| 0.254424
| 0.217581
| 0.16188
| 0.099507
| 0.089063
| 0
| 0.042623
| 0.25765
| 5,752
| 184
| 113
| 31.26087
| 0.764637
| 0.160466
| 0
| 0.296552
| 0
| 0
| 0.015823
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 1
| 0.117241
| false
| 0
| 0.02069
| 0
| 0.186207
| 0.02069
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e00dbb3c20046835e182d01718caf34d09944176
| 22,455
|
py
|
Python
|
python/snewpy/snowglobes.py
|
svalder/snewpy
|
5723189ae3dce3506f2fab056bbef24c9ab1a31f
|
[
"BSD-3-Clause"
] | null | null | null |
python/snewpy/snowglobes.py
|
svalder/snewpy
|
5723189ae3dce3506f2fab056bbef24c9ab1a31f
|
[
"BSD-3-Clause"
] | null | null | null |
python/snewpy/snowglobes.py
|
svalder/snewpy
|
5723189ae3dce3506f2fab056bbef24c9ab1a31f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""The ``snewpy.snowglobes`` module contains functions for interacting with SNOwGLoBES.
`SNOwGLoBES <https://github.com/SNOwGLoBES/snowglobes>`_ can estimate detected
event rates from a given input supernova neutrino flux. It supports many
different neutrino detectors, detector materials and interaction channels.
There are three basic steps to using SNOwGLoBES from SNEWPY:
* **Generating input files for SNOwGLoBES:**
There are two ways to do this, either generate a time series or a fluence file. This is done taking as input the supernova simulation model.
The first will evaluate the neutrino flux at each time step, the latter will compute the integrated neutrino flux (fluence) in the time bin.
The result is a compressed .tar file containing all individual input files.
* **Running SNOwGLoBES:**
This step convolves the fluence generated in the previous step with the cross-sections for the interaction channels happening in various detectors supported by SNOwGLoBES.
It takes into account the effective mass of the detector as well as a smearing matrix describing the energy-dependent detection efficiency.
The output gives the number of events detected as a function of energy for each interaction channel, integrated in a given time window (or time bin), or in a snapshot in time.
* **Collating SNOwGLoBES outputs:**
This step puts together all the interaction channels and time bins evaluated by SNOwGLoBES in a single file (for each detector and for each time bin).
The output tables allow to build the detected neutrino energy spectrum and neutrino time distribution, for each reaction channel or the sum of them.
"""
import io
import logging
import os
import re
import tarfile
from pathlib import Path
from tempfile import TemporaryDirectory
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from tqdm.auto import tqdm
import snewpy.models
from snewpy.flavor_transformation import *
from snewpy.neutrino import Flavor, MassHierarchy
from snewpy.snowglobes_interface import SNOwGLoBES
logger = logging.getLogger(__name__)
def generate_time_series(model_path, model_type, transformation_type, d, output_filename=None, ntbins=30, deltat=None):
"""Generate time series files in SNOwGLoBES format.
This version will subsample the times in a supernova model, produce energy
tables expected by SNOwGLoBES, and compress the output into a tarfile.
Parameters
----------
model_path : str
Input file containing neutrino flux information from supernova model.
model_type : str
Format of input file. Matches the name of the corresponding class in :py:mod:`snewpy.models`.
transformation_type : str
Name of flavor transformation. See snewpy.flavor_transformation documentation for possible values.
d : int or float
Distance to supernova in kpc.
output_filename : str or None
Name of output file. If ``None``, will be based on input file name.
ntbins : int
Number of time slices. Will be ignored if ``deltat`` is also given.
deltat : astropy.Quantity or None
Length of time slices.
Returns
-------
str
Path of compressed .tar file with neutrino flux data.
"""
model_class = getattr(snewpy.models.ccsn, model_type)
# Choose flavor transformation. Use dict to associate the transformation name with its class.
flavor_transformation_dict = {'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)}
flavor_transformation = flavor_transformation_dict[transformation_type]
model_dir, model_file = os.path.split(os.path.abspath(model_path))
snmodel = model_class(model_path)
# Subsample the model time. Default to 30 time slices.
tmin = snmodel.get_time()[0]
tmax = snmodel.get_time()[-1]
if deltat is not None:
dt = deltat
ntbins = int((tmax-tmin)/dt)
else:
dt = (tmax - tmin) / (ntbins+1)
tedges = np.arange(tmin/u.s, tmax/u.s, dt/u.s)*u.s
times = 0.5*(tedges[1:] + tedges[:-1])
# Generate output.
if output_filename is not None:
tfname = output_filename + 'kpc.tar.bz2'
else:
model_file_root, _ = os.path.splitext(model_file) # strip extension (if present)
tfname = model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(tmin, tmax, ntbins, d) + 'kpc.tar.bz2'
with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as tf:
#creates file in tar archive that gives information on parameters
output = '\n'.join(map(str, transformation_type)).encode('ascii')
tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output))
MeV = 1.60218e-6 * u.erg
energy = np.linspace(0, 100, 501) * MeV # 1MeV
# Loop over sampled times.
for i, t in enumerate(times):
osc_spectra = snmodel.get_transformed_spectra(t, energy, flavor_transformation)
osc_fluence = {}
table = []
table.append('# TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV Fluence at Earth for this timebin in neutrinos per cm^2'.format(t, dt))
table.append('# E(GeV) NuE NuMu NuTau aNuE aNuMu aNuTau')
# Generate energy + number flux table.
for j, E in enumerate(energy):
for flavor in Flavor:
osc_fluence[flavor] = osc_spectra[flavor][j] * dt * 0.2 * MeV / (4.*np.pi*(d*1000*3.086e+18)**2)
s = '{:17.8E}'.format(E/(1e3 * MeV))
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])
table.append(s)
logging.debug(s)
# Encode energy/flux table and output to file in tar archive.
output = '\n'.join(table).encode('ascii')
extension = ".dat"
model_file_root, _ = os.path.splitext(model_file)
filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \
'.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(tmin/u.s, tmax/u.s, ntbins, d, extension)
info = tarfile.TarInfo(name=filename)
info.size = len(output)
tf.addfile(info, io.BytesIO(output))
return os.path.join(model_dir, tfname)
def generate_fluence(model_path, model_type, transformation_type, d, output_filename=None, tstart=None, tend=None):
"""Generate fluence files in SNOwGLoBES format.
This version will subsample the times in a supernova model, produce energy
tables expected by SNOwGLoBES, and compress the output into a tarfile.
Parameters
----------
model_path : str
Input file containing neutrino flux information from supernova model.
model_type : str
Format of input file. Matches the name of the corresponding class in :py:mod:`snewpy.models`.
transformation_type : str
Name of flavor transformation. See snewpy.flavor_transformation documentation for possible values.
d : int or float
Distance to supernova in kpc.
output_filename : str or None
Name of output file. If ``None``, will be based on input file name.
tstart : astropy.Quantity or None
Start of time interval to integrate over, or list of start times of the time series bins.
tend : astropy.Quantity or None
End of time interval to integrate over, or list of end times of the time series bins.
Returns
-------
str
Path of compressed .tar file with neutrino flux data.
"""
model_class = getattr(snewpy.models.ccsn, model_type)
# Choose flavor transformation. Use dict to associate the transformation name with its class.
flavor_transformation_dict = {'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)}
flavor_transformation = flavor_transformation_dict[transformation_type]
model_dir, model_file = os.path.split(os.path.abspath(model_path))
snmodel = model_class(model_path)
#set the timings up
#default if inputs are None: full time window of the model
if tstart is None:
tstart = snmodel.get_time()[0]
tend = snmodel.get_time()[-1]
try:
if len(tstart/u.s) > 0:
t0 = tstart[0]
t1 = tend[-1]
nbin = len(tstart/u.s)
except:
t0 = tstart
t1 = tend
nbin = 1
times = 0.5*(tstart + tend)
model_times = snmodel.get_time()
model_tstart = model_times*1.0
model_tend = model_times*1.0
model_tstart[0] = model_times[0]
for i in range(1, len(model_times), 1):
model_tstart[i] = 0.5*(model_times[i]+model_times[i-1])
model_tend[i-1] = model_tstart[i]
model_tend[len(model_times)-1] = model_times[-1]
if nbin > 1:
starting_index = np.zeros(len(times), dtype=np.int64)
ending_index = np.zeros(len(times), dtype=np.int64)
for i in range(len(tstart)):
starting_index[i] = next(j for j, t in enumerate(model_tend) if t > tstart[i])
ending_index[i] = next(j for j, t in enumerate(model_tend) if t >= tend[i])
else:
starting_index = [next(j for j, t in enumerate(model_tend) if t > tstart)]
ending_index = [next(j for j, t in enumerate(model_tend) if t >= tend)]
# Generate output.
if output_filename is not None:
tfname = output_filename+'.tar.bz2'
else:
model_file_root, _ = os.path.splitext(model_file) # strip extension (if present)
tfname = model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(t0, t1, nbin, d) + 'kpc.tar.bz2'
with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as tf:
#creates file in tar archive that gives information on parameters
output = '\n'.join(map(str, transformation_type)).encode('ascii')
tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output))
MeV = 1.60218e-6 * u.erg
energy = np.linspace(0, 100, 501) * MeV
# Loop over sampled times.
for i in range(nbin):
if nbin > 1:
ta = tstart[i]
tb = tend[i]
t = times[i]
dt = tb-ta
else:
ta = tstart
tb = tend
t = times
dt = tb-ta
#first time bin of model in requested interval
osc_spectra = snmodel.get_transformed_spectra(model_times[starting_index[i]], energy, flavor_transformation)
if dt < model_tend[starting_index[i]]-ta:
dt = dt
else:
for flavor in Flavor:
osc_spectra[flavor] *= (model_tend[starting_index[i]]-ta)
#intermediate time bins of model in requested interval
for j in range(starting_index[i]+1, ending_index[i], 1):
temp_spectra = snmodel.get_transformed_spectra(model_times[j], energy, flavor_transformation)
for flavor in Flavor:
osc_spectra[flavor] += temp_spectra[flavor]*(model_tend[j]-model_tstart[j])
#last time bin of model in requested interval
temp_spectra = snmodel.get_transformed_spectra(
model_times[ending_index[i]], energy, flavor_transformation)
for flavor in Flavor:
osc_spectra[flavor] += temp_spectra[flavor]*(tb-model_tstart[ending_index[i]])
for flavor in Flavor:
osc_spectra[flavor] /= (tb-ta)
osc_fluence = {}
table = []
table.append('# TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV Fluence at Earth for this timebin in neutrinos per cm^2'.format(t, dt))
table.append('# E(GeV) NuE NuMu NuTau aNuE aNuMu aNuTau')
# Generate energy + number flux table.
for j, E in enumerate(energy):
for flavor in Flavor:
osc_fluence[flavor] = osc_spectra[flavor][j] * dt * 0.2 * MeV / (4.*np.pi*(d*1000*3.086e+18)**2)
s = '{:17.8E}'.format(E/(1e3 * MeV))
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])
table.append(s)
logging.debug(s)
# Encode energy/flux table and output to file in tar archive.
output = '\n'.join(table).encode('ascii')
extension = ".dat"
if output_filename is not None:
if nbin > 1:
filename = output_filename+"_"+str(i)+extension
else:
filename = output_filename+extension
else:
model_file_root, _ = os.path.splitext(model_file) # strip extension (if present)
filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \
'.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(t0, t1, nbin, d, extension)
info = tarfile.TarInfo(name=filename)
info.size = len(output)
tf.addfile(info, io.BytesIO(output))
return os.path.join(model_dir, tfname)
def simulate(SNOwGLoBESdir, tarball_path, detector_input="all", verbose=False):
"""Takes as input the neutrino flux files and configures and runs the supernova script inside SNOwGLoBES, which outputs calculated event rates expected for a given (set of) detector(s). These event rates are given as a function of the neutrino energy and time, for each interaction channel.
Parameters
----------
SNOwGLoBESdir : str
Path to directory where SNOwGLoBES is installed.
tarball_path : str
Path of compressed .tar file produced e.g. by ``generate_time_series()`` or ``generate_fluence()``.
detector_input : str
Name of detector. If ``"all"``, will use all detectors supported by SNOwGLoBES.
verbose : bool
Whether to generate verbose output, e.g. for debugging.
"""
sng = SNOwGLoBES(SNOwGLoBESdir)
if detector_input == 'all':
detector_input = list(sng.detectors)
detector_input.remove('d2O')
elif isinstance(detector_input,str):
detector_input = [detector_input]
result = {}
#Extracts data from tarfile and sets up lists of paths and fluxfilenames for later use
with TemporaryDirectory(prefix='snowglobes') as tempdir:
with tarfile.open(tarball_path) as tar:
tar.extractall(tempdir)
flux_files = list(Path(tempdir).glob('*.dat'))
if len(detector_input)>0:
detector_input = tqdm(detector_input, desc='Detectors', leave=False)
for det in detector_input:
res=sng.run(flux_files, det)
result[det]=dict(zip((f.stem for f in flux_files),res))
# save result to file for re-use in collate()
cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy'
logging.info(f'Saving simulation results to {cache_file}')
np.save(cache_file, result)
return result
re_chan_label = re.compile(r'nu(e|mu|tau)(bar|)_([A-Z][a-z]*)(\d*)_?(.*)')
def get_channel_label(c):
mapp = {'nc':'NeutralCurrent',
'ibd':'Inverse Beta Decay',
'e':r'${\nu}_x+e^-$'}
def gen_label(m):
flv,bar,Nuc,num,res = m.groups()
if flv!='e':
flv='\\'+flv
if bar:
bar='\\'+bar
s = f'${bar}{{\\nu}}_{flv}$ '+f'${{}}^{{{num}}}{Nuc}$ '+res
return s
if c in mapp:
return mapp[c]
else:
return re_chan_label.sub(gen_label, c)
def collate(SNOwGLoBESdir, tarball_path, detector_input="all", skip_plots=False, verbose=False, remove_generated_files=True):
"""Collates SNOwGLoBES output files and generates plots or returns a data table.
Parameters
----------
SNOwGLoBESdir : str
Path to directory where SNOwGLoBES is installed.
tarball_path : str
Path of compressed .tar file produced e.g. by ``generate_time_series()`` or ``generate_fluence()``.
detector_input : str
Name of detector. If ``"all"``, will use all detectors supported by SNOwGLoBES.
skip_plots: bool
If False, it gives as output the plot of the energy distribution for each time bin and for each interaction channel.
verbose : bool
Whether to generate verbose output, e.g. for debugging.
remove_generated_files: bool
Remove the output files from SNOwGLoBES, collated files, and .png's made for this snewpy run.
Returns
-------
dict
Dictionary of data tables: One table per time bin; each table contains in the first column the energy bins, in the remaining columns the number of events for each interaction channel in the detector.
"""
def aggregate_channels(table, **patterns):
#rearrange the table to have only channel column
levels = list(table.columns.names)
levels.remove('channel')
t = table.stack(levels)
for name,pattern in patterns.items():
#get channels which contain `like`
t_sel = t.filter(like=pattern)
#sum over them and save to a separate column
t_agg = t_sel.sum(axis='columns')
#drop processed channels
t.drop(t_sel.columns, axis='columns',inplace=True)
t[name]=t_agg #fill the column
#return table with the original levels order
t = t.unstack(levels)
t = t.reorder_levels(table.columns.names, axis=1)
return t
def do_plot(table, params):
#plotting the events from given table
flux,det,weighted,smeared = params
for c in table.columns:
if table[c].max() > 0.1:
plt.plot(table[c],drawstyle='steps',label=get_channel_label(c), lw=1)
plt.xlim(right=0.10)
plt.ylim(bottom=0.10)
plt.yscale('log')
plt.legend(bbox_to_anchor=(0.5, 0.5, 0.5, 0.5), loc='best', borderaxespad=0) # formats complete graph
smear_title = 'Interaction' if smeared=='unsmeared' else 'Detected'
plt.title(f'{flux} {det.capitalize()} {weighted.capitalize()} {smear_title} Events')
if smeared=='smeared':
plt.xlabel('Detected Energy (GeV)')
plt.ylabel('Events')
else:
plt.xlabel('Neutrino Energy (GeV)')
plt.ylabel('Interaction Events')
#read the results from storage
cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy'
logging.info(f'Reading tables from {cache_file}')
tables = np.load(cache_file, allow_pickle=True).tolist()
#This output is similar to what produced by:
#tables = simulate(SNOwGLoBESdir, tarball_path,detector_input)
#dict for old-style results, for backward compatibiity
results = {}
#save collated files:
with TemporaryDirectory(prefix='snowglobes') as tempdir:
tempdir = Path(tempdir)
for det in tables:
results[det] = {}
for flux,t in tables[det].items():
t = aggregate_channels(t,nc='nc_',e='_e')
for w in ['weighted','unweighted']:
for s in ['smeared','unsmeared']:
table = t[w][s]
filename_base = f'{flux}_{det}_events_{s}_{w}'
filename = tempdir/f'Collated_{filename_base}.dat'
#save results to text files
with open(filename,'w') as f:
f.write(table.to_string(float_format='%23.15g'))
#format the results for the output
header = 'Energy '+' '.join(list(table.columns))
data = table.to_numpy().T
index = table.index.to_numpy()
data = np.concatenate([[index],data])
results[filename.name] = {'header':header,'data':data}
#optionally plot the results
if skip_plots is False:
plt.figure(dpi=300)
do_plot(table,(flux,det,w,s))
filename = tempdir/f'{filename_base}_log_plot.png'
plt.savefig(filename.with_suffix('.png'), dpi=300, bbox_inches='tight')
#Make a tarfile with the condensed data files and plots
output_name = Path(tarball_path).stem
output_name = output_name[:output_name.rfind('.tar')]+'_SNOprocessed'
output_path = Path(tarball_path).parent/(output_name+'.tar.gz')
with tarfile.open(output_path, "w:gz") as tar:
for file in tempdir.iterdir():
tar.add(file,arcname=output_name+'/'+file.name)
logging.info(f'Created archive: {output_path}')
return results
| 46.298969
| 553
| 0.631485
| 2,934
| 22,455
| 4.724608
| 0.178937
| 0.024527
| 0.016159
| 0.01111
| 0.524816
| 0.513851
| 0.479873
| 0.467104
| 0.452821
| 0.447482
| 0
| 0.013161
| 0.258962
| 22,455
| 484
| 554
| 46.394628
| 0.819892
| 0.311378
| 0
| 0.347826
| 0
| 0.01087
| 0.113608
| 0.023398
| 0.003623
| 0
| 0
| 0
| 0
| 1
| 0.028986
| false
| 0
| 0.057971
| 0
| 0.115942
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e00e074bf789711cc01d53bcaa030d52c4e69f5b
| 4,621
|
py
|
Python
|
rlcycle/dqn_base/loss.py
|
cyoon1729/Rlcycle
|
5c65b9dd61a6fd5d6dfe92f0b3e04bf309828569
|
[
"MIT"
] | 128
|
2020-06-29T01:40:36.000Z
|
2022-03-29T15:37:39.000Z
|
rlcycle/dqn_base/loss.py
|
cyoon1729/Rlcycle
|
5c65b9dd61a6fd5d6dfe92f0b3e04bf309828569
|
[
"MIT"
] | 8
|
2020-06-29T03:51:50.000Z
|
2020-07-22T23:55:47.000Z
|
rlcycle/dqn_base/loss.py
|
cyoon1729/Rlcycle
|
5c65b9dd61a6fd5d6dfe92f0b3e04bf309828569
|
[
"MIT"
] | 24
|
2020-07-02T06:03:03.000Z
|
2022-03-22T11:59:53.000Z
|
from typing import List, Tuple
from omegaconf import DictConfig
import torch
import torch.nn as nn
import torch.nn.functional as F
from rlcycle.common.abstract.loss import Loss
class DQNLoss(Loss):
"""Compute double DQN loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
q_value = network.forward(states).gather(1, actions)
with torch.no_grad():
next_q = torch.max(target_network.forward(next_states), 1)[0].unsqueeze(1)
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_q = rewards + (1 - dones) * n_step_gamma * next_q
element_wise_loss = F.smooth_l1_loss(
q_value, target_q.detach(), reduction="none"
)
return element_wise_loss
class QRLoss(Loss):
"""Compute quantile regression loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...],
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
z_dists = network.forward(states)
z_dists = z_dists[list(range(states.size(0))), actions.view(-1)]
with torch.no_grad():
next_z = target_network.forward(next_states)
next_actions = torch.max(next_z.mean(2), dim=1)[1]
next_z = next_z[list(range(states.size(0))), next_actions]
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_z = rewards + (1 - dones) * n_step_gamma * next_z
distance = target_z - z_dists
quantile_huber_loss = (
network.tau - (distance.detach() < 0).float()
).abs() * self.huber_loss(distance)
element_wise_loss = torch.mean(quantile_huber_loss, dim=1, keepdim=True)
return element_wise_loss
@staticmethod
def huber_loss(x: List[torch.Tensor], k: float = 1.0):
return torch.where(x.abs() <= k, 0.5 * x.pow(2), k * (x.abs() - 0.5 * k))
class CategoricalLoss(Loss):
"""Compute C51 loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
batch_size = states.size(0)
offset = (
torch.linspace(0, (batch_size - 1) * network.num_atoms, batch_size)
.long()
.unsqueeze(1)
.expand(batch_size, network.num_atoms)
)
if self.use_cuda:
offset = offset.cuda()
z_dists = network.forward(states)
z_dists = z_dists[list(range(states.size(0))), actions.view(-1)]
with torch.no_grad():
next_z = target_network.forward(next_states)
next_actions = torch.max(next_z.mean(2), dim=1)[1]
next_z = next_z[list(range(states.size(0))), next_actions]
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_z = rewards + (1 - dones) * n_step_gamma * network.support
target_z = torch.clamp(target_z, min=network.v_min, max=network.v_max)
target_proj = self.dist_projection(network, next_z, target_z, offset)
log_dist = torch.log(z_dists)
element_wise_loss = -(target_proj * log_dist).sum(1)
return element_wise_loss
def dist_projection(
self,
network: nn.Module,
next_z: torch.Tensor,
target_z: torch.Tensor,
offset: torch.Tensor,
) -> torch.Tensor:
b = (target_z - network.v_min) / network.delta_z
lb = b.floor().long()
ub = b.ceil().long()
proj_dist = torch.zeros(next_z.size())
if self.use_cuda:
proj_dist = proj_dist.cuda()
proj_dist.view(-1).index_add_(
0, (lb + offset).view(-1), (next_z * (ub.float() - b)).view(-1)
)
proj_dist.view(-1).index_add_(
0, (ub + offset).view(-1), (next_z * (b - lb.float())).view(-1)
)
return proj_dist
| 34.485075
| 86
| 0.61069
| 614
| 4,621
| 4.315961
| 0.187296
| 0.026415
| 0.067925
| 0.043019
| 0.527547
| 0.496981
| 0.496981
| 0.468679
| 0.468679
| 0.468679
| 0
| 0.012854
| 0.259251
| 4,621
| 133
| 87
| 34.744361
| 0.76132
| 0.015797
| 0
| 0.438776
| 0
| 0
| 0.000883
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0
| 0.061224
| 0.010204
| 0.22449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e00f4579dad4a0f1f3310721291b602f532b6bf5
| 12,518
|
py
|
Python
|
scripts/gap_filling_viewer.py
|
raphischer/probgf
|
01bd2be85aa98afd79fc05c1eb3e260b2bcd2ebd
|
[
"MIT"
] | 3
|
2020-11-19T10:28:57.000Z
|
2021-04-15T17:16:24.000Z
|
scripts/gap_filling_viewer.py
|
raphischer/probgf
|
01bd2be85aa98afd79fc05c1eb3e260b2bcd2ebd
|
[
"MIT"
] | null | null | null |
scripts/gap_filling_viewer.py
|
raphischer/probgf
|
01bd2be85aa98afd79fc05c1eb3e260b2bcd2ebd
|
[
"MIT"
] | null | null | null |
"""viewer application which allows to interactively view spatio-temporal gap filling results"""
import os
import argparse
from datetime import datetime, timedelta
from tkinter import Canvas, Tk, Button, RAISED, DISABLED, SUNKEN, NORMAL
import numpy as np
from PIL import Image, ImageTk
import probgf.media as media
class MainWindow():
def next(self, event=None):
self.curr_img = (self.curr_img + 1) % len(self.imgs_orig)
self.refresh()
def prev(self, event=None):
self.curr_img = (self.curr_img - 1) % len(self.imgs_orig)
self.refresh()
def click_wheel(self, event):
self.start_drag = (event.x + self.shift_x, event.y + self.shift_y)
def click_left(self, event):
if not self.over_button:
self.prev()
def click_right(self, event):
if not self.over_button:
self.next()
def refresh(self):
zoom = float(self.zoom) / 100
self.start_x = int(self.img_w_f / 2 - self.img_w_f / zoom / 2) + self.shift_x
self.end_x = int(self.start_x + self.img_w_f / zoom)
self.start_y = int(self.img_w_f / 2 - self.img_w_f / zoom / 2) + self.shift_y
self.end_y = int(self.start_y + self.img_w_f / zoom)
if not self.mask_toggle:
self.b_masks.config(relief=RAISED)
img1 = self.imgs_orig[self.curr_img]
img2 = self.imgs_pred[self.curr_img]
else:
self.b_masks.config(relief=SUNKEN)
img1 = self.imgs_orig_m[self.curr_img]
img2 = self.imgs_pred_m[self.curr_img]
img1 = img1.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS)
img2 = img2.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS)
self.imgs_orig_v[self.curr_img] = ImageTk.PhotoImage(img1)
self.imgs_pred_v[self.curr_img] = ImageTk.PhotoImage(img2)
self.canvas.itemconfig(self.i_left, image = self.imgs_orig_v[self.curr_img])
self.canvas.itemconfig(self.i_right, image = self.imgs_pred_v[self.curr_img])
self.canvas.itemconfig(self.i_list, image = self.imagelists[self.curr_img])
self.canvas.itemconfig(self.day_info, text='{} - cloud cover {:06.2f}% - estimated MAE {}'.format(self.dates[self.curr_img],
self.cc[self.curr_img] * 100,
self.errors[self.curr_img]))
if self.zoom == 100:
self.canvas.itemconfig(self.zoom, text='')
self.b_reset.config(state=DISABLED)
else:
self.canvas.itemconfig(self.zoom, text='ZOOM: {:3d}%'.format(self.zoom))
self.b_reset.config(state=NORMAL)
def zoomer(self, event):
if event.num == 4 or event.delta == 120 or event.keysym == 'plus':
self.zoom += 20
elif event.delta == 240:
self.zoom += 40
elif event.delta == 360:
self.zoom += 60
else:
if self.zoom - 20 >= 100:
self.zoom -= 20
if self.zoom == 100:
self.reset_transform()
self.refresh()
def drag_roi(self, event):
self.shift_x = min(max(self.start_drag[0] - event.x, 0 - int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2)),
int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2))
self.shift_y = min(max(self.start_drag[1] - event.y, 0 - int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2)),
int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2))
self.refresh()
def toggle_mask(self, event=None):
self.mask_toggle = not self.mask_toggle
self.refresh()
def reset_transform(self, event=None):
self.mask_toggle = False
self.zoom = 100
self.shift_x = 0
self.shift_y = 0
self.refresh()
def button_enter(self, event):
self.over_button = True
def button_leave(self, enter):
self.over_button = False
def __init__(self, root, w, h, imgs_p, imgs_o, imgs_m, dates, errors, logos):
self.dates = dates
self.errors = errors
# setup images
self.img_w = int(h * 0.68) # width of each displayed image
self.imgs_orig_m = [] # masked full images
self.imgs_pred_m = []
self.imgs_orig = [] # unmasked full images
self.imgs_pred = []
self.cc = []
for index, img in enumerate(imgs_p):
self.imgs_orig.append(imgs_o[index].resize((self.img_w, self.img_w), resample=0))
self.imgs_pred.append(img.resize((self.img_w, self.img_w), resample=0))
self.imgs_orig_m.append(Image.blend(self.imgs_orig[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5))
self.imgs_pred_m.append(Image.blend(self.imgs_pred[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5))
self.cc.append(1 - np.count_nonzero(np.array(imgs_m[index])) / np.array(imgs_m[index]).size)
self.curr_img = 0
# text labels and logos
h_logos = int(h / 17)
b_logos = int(w / 100)
self.canvas = Canvas(root, width=w, height=h)
self.canvas.pack()
self.canvas.configure(background='white')
self.logo1 = ImageTk.PhotoImage(logos[0].resize((int(h_logos / logos[0].size[1] * logos[0].size[0]), h_logos), Image.ANTIALIAS))
self.logo2 = ImageTk.PhotoImage(logos[1].resize((int(h_logos / logos[1].size[1] * logos[1].size[0]), h_logos), Image.ANTIALIAS))
self.logo3 = ImageTk.PhotoImage(logos[2].resize((int(h_logos / logos[2].size[1] * logos[2].size[0]), h_logos), Image.ANTIALIAS))
self.canvas.create_image(int(self.logo1.width() / 2 + b_logos), int(self.logo1.height() / 2 + b_logos), image=self.logo1)
self.canvas.create_image(int(w - self.logo2.width() / 2 - b_logos), int(self.logo2.height() / 2 + b_logos), image=self.logo2)
self.canvas.create_image(int(w - self.logo3.width() / 2 - b_logos), int(h - (self.logo3.height() / 2 + b_logos)), image=self.logo3)
self.canvas.create_text(w / 2, h * 0.06, font=("Courier", int(h / 25)), text='Gap Filling Viewer')
self.canvas.create_text(w / 3.9, h * 0.19, font=("Courier", int(h / 35)), text='Observed')
self.canvas.create_text(w - w / 3.9, h * 0.19, font=("Courier", int(h / 35)), text='Predicted')
self.day_info = self.canvas.create_text(w / 2, h * 0.13, font=("Courier", int(h / 30)), text='')
self.zoom = self.canvas.create_text(w * 0.12, h * 0.94, font=("Courier", int(h / 50)), text='')
# image timeline
imagelist_h = int(self.img_w / len(self.imgs_pred)) + 1
imagelist_a = np.zeros((len(self.imgs_pred), imagelist_h, imagelist_h, 3), dtype='uint8')
for index in range(len(self.imgs_pred)):
imagelist_a[index, :, :, :] = np.array(self.imgs_pred[index].resize((imagelist_h, imagelist_h), Image.ANTIALIAS))
self.imagelists = []
for index in range(len(self.imgs_pred)):
c_list = np.array(imagelist_a)
c_list[index, :int(w / 600), :, :] = 255
c_list[index, (imagelist_h - int(w / 600)):, :, :] = 255
c_list[index, :, :int(w / 600), :] = 255
c_list[index, :, (imagelist_h - int(w / 600)):, :] = 255
self.imagelists.append(ImageTk.PhotoImage(Image.fromarray(c_list.reshape(len(self.imgs_pred) * imagelist_h, imagelist_h, 3))))
self.i_list = self.canvas.create_image(w * 0.5, h * 0.56, image=self.imagelists[self.curr_img])
# images and buttons
self.img_w_f = self.imgs_orig[0].size[0] # full image width
self.imgs_orig_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in self.imgs_orig] # images for visualization
self.imgs_pred_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in self.imgs_pred]
self.i_left = self.canvas.create_image(w / 3.9, h * 0.56, image=self.imgs_orig_v[self.curr_img])
self.i_right = self.canvas.create_image(w - w / 3.9, h * 0.56, image=self.imgs_pred_v[self.curr_img])
self.b_masks = Button(root, font=("Courier", int(h / 50)), text = "Show masks", command=self.toggle_mask)
self.b_reset = Button(root, font=("Courier", int(h / 50)), text = "Reset view", command=self.reset_transform, state=DISABLED)
self.b_quit = Button(root, font=("Courier", int(h / 50)), text = "Quit", command=self.canvas.master.destroy)
self.reset_transform()
self.canvas.create_window(w * 0.30, h * 0.94, window=self.b_masks)
self.canvas.create_window(w * 0.50, h * 0.94, window=self.b_reset)
self.canvas.create_window(w * 0.70, h * 0.94, window=self.b_quit)
# bind buttons and keys
root.bind("q", lambda e: self.canvas.master.destroy())
root.bind("r", self.reset_transform)
root.bind("m", self.toggle_mask)
root.bind("<Right>", self.next)
root.bind("<Left>", self.prev)
root.bind("<Down>", self.next)
root.bind("<Up>", self.prev)
root.bind("<Button-3>", self.click_right)
root.bind("<Button-1>", self.click_left)
root.bind("<Button-2>", self.click_wheel)
root.bind("<Button-4>", self.zoomer)
root.bind("<Button-5>", self.zoomer)
root.bind("<MouseWheel>", self.zoomer)
root.bind("<B2-Motion>", self.drag_roi)
root.bind("+", self.zoomer)
root.bind("-", self.zoomer)
self.over_button = False
self.b_masks.bind("<Enter>", self.button_enter)
self.b_masks.bind("<Leave>", self.button_leave)
self.b_reset.bind("<Enter>", self.button_enter)
self.b_reset.bind("<Leave>", self.button_leave)
self.b_quit.bind("<Enter>", self.button_enter)
self.b_quit.bind("<Leave>", self.button_leave)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-l', '--left', default='imgs/original/',
help='directory with images which are shown on the left')
parser.add_argument('-r', '--right', default='imgs/pred_outline_lin_spatial_clouds0_2/',
help='directory with images which are shown on the right')
parser.add_argument('-m', '--masks', default='imgs/mask/',
help='directory with mask images')
parser.add_argument('-R', '--report', default='report_lin_spatial_clouds0_2.csv',
help='report containing date and error information for the right hand images')
parser.add_argument('-y', '--year', type=int, default=2018,
help='year of data acquisition')
parser.add_argument('-W', '--width', type=int, default=1280,
help='window width')
parser.add_argument('-H', '--height', type=int, default=720,
help='window height')
args = parser.parse_args()
imgs_o = [Image.open(img) for img in sorted([os.path.join(args.left, img) for img in os.listdir(args.left)])]
imgs_p = [Image.open(img) for img in sorted([os.path.join(args.right, img) for img in os.listdir(args.right)])]
imgs_m = [Image.open(img) for img in sorted([os.path.join(args.masks, img) for img in os.listdir(args.masks)])]
report = np.genfromtxt(args.report, delimiter=',', dtype=float)[1:-1]
dates = [(datetime(args.year, 1, 1) + timedelta(int(report[day, 1]) - 1)).strftime('%b %d %Y') for day in range(report.shape[0])]
errors = ['{:4.1f}'.format(error) if error != 0.0 else 'n.a. ' for error in report[:, 5]]
logos = [media.logo1, media.logo2, media.logo3]
if len(imgs_o) != len(dates):
raise RuntimeError('Different number of images in {} than days in the report {}!'.format(args.left, args.report))
if len(imgs_p) != len(dates):
raise RuntimeError('Different number of images in {} than days in the report {}!'.format(args.right, args.report))
if len(imgs_m) != len(dates):
raise RuntimeError('Different number of images in {} than days in the report {}!'.format(args.masks, args.report))
root = Tk()
root.title('Gap Filling Viewer')
root.geometry("%dx%d+0+0" % (args.width, args.height))
MainWindow(root, args.width, args.height, imgs_p, imgs_o, imgs_m, dates, errors, logos)
root.focus_set()
root.mainloop()
| 51.941909
| 158
| 0.616552
| 1,872
| 12,518
| 3.969017
| 0.149038
| 0.035532
| 0.035532
| 0.01817
| 0.493809
| 0.406864
| 0.340242
| 0.273082
| 0.237012
| 0.182638
| 0
| 0.027346
| 0.228791
| 12,518
| 240
| 159
| 52.158333
| 0.742283
| 0.023406
| 0
| 0.105528
| 0
| 0
| 0.079115
| 0.005897
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065327
| false
| 0
| 0.035176
| 0
| 0.105528
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e00f57f732929e05a58cd0ef2eae47d08e8561a9
| 4,946
|
py
|
Python
|
paypal/pro/tests.py
|
pdfcrowd/django-paypal
|
0ea56dc6c799204f0f8719481f94d0c79de6eff5
|
[
"Unlicense",
"MIT"
] | 1
|
2019-06-13T15:59:48.000Z
|
2019-06-13T15:59:48.000Z
|
pro/tests.py
|
sirmmo/django-paypal
|
0c8aeec1c319a08ce1bfdf828534d01b69b8fa27
|
[
"MIT",
"Unlicense"
] | null | null | null |
pro/tests.py
|
sirmmo/django-paypal
|
0c8aeec1c319a08ce1bfdf828534d01b69b8fa27
|
[
"MIT",
"Unlicense"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.forms import ValidationError
from django.http import QueryDict
from django.test import TestCase
from django.test.client import Client
from paypal.pro.fields import CreditCardField
from paypal.pro.helpers import PayPalWPP, PayPalError
class RequestFactory(Client):
# Used to generate request objects.
def request(self, **request):
environ = {
'HTTP_COOKIE': self.cookies,
'PATH_INFO': '/',
'QUERY_STRING': '',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
'SERVER_PROTOCOL': 'HTTP/1.1',
}
environ.update(self.defaults)
environ.update(request)
return WSGIRequest(environ)
RF = RequestFactory()
REQUEST = RF.get("/pay/", REMOTE_ADDR="127.0.0.1:8000")
class DummyPayPalWPP(PayPalWPP):
pass
# """Dummy class for testing PayPalWPP."""
# responses = {
# # @@@ Need some reals data here.
# "DoDirectPayment": """ack=Success×tamp=2009-03-12T23%3A52%3A33Z&l_severitycode0=Error&l_shortmessage0=Security+error&l_longmessage0=Security+header+is+not+valid&version=54.0&build=854529&l_errorcode0=&correlationid=""",
# }
#
# def _request(self, data):
# return self.responses["DoDirectPayment"]
class CreditCardFieldTest(TestCase):
def testCreditCardField(self):
field = CreditCardField()
field.clean('4797503429879309')
self.assertEquals(field.card_type, "Visa")
self.assertRaises(ValidationError, CreditCardField().clean, '1234567890123455')
class PayPalWPPTest(TestCase):
def setUp(self):
# Avoding blasting real requests at PayPal.
self.old_debug = settings.DEBUG
settings.DEBUG = True
self.item = {
'amt': '9.95',
'inv': 'inv',
'custom': 'custom',
'next': 'http://www.example.com/next/',
'returnurl': 'http://www.example.com/pay/',
'cancelurl': 'http://www.example.com/cancel/'
}
self.wpp = DummyPayPalWPP(REQUEST)
def tearDown(self):
settings.DEBUG = self.old_debug
def test_doDirectPayment_missing_params(self):
data = {'firstname': 'Chewbacca'}
self.assertRaises(PayPalError, self.wpp.doDirectPayment, data)
def test_doDirectPayment_valid(self):
data = {
'firstname': 'Brave',
'lastname': 'Star',
'street': '1 Main St',
'city': u'San Jos\xe9',
'state': 'CA',
'countrycode': 'US',
'zip': '95131',
'expdate': '012019',
'cvv2': '037',
'acct': '4797503429879309',
'creditcardtype': 'visa',
'ipaddress': '10.0.1.199',}
data.update(self.item)
self.assertTrue(self.wpp.doDirectPayment(data))
def test_doDirectPayment_invalid(self):
data = {
'firstname': 'Epic',
'lastname': 'Fail',
'street': '100 Georgia St',
'city': 'Vancouver',
'state': 'BC',
'countrycode': 'CA',
'zip': 'V6V 1V1',
'expdate': '012019',
'cvv2': '999',
'acct': '1234567890',
'creditcardtype': 'visa',
'ipaddress': '10.0.1.199',}
data.update(self.item)
self.assertFalse(self.wpp.doDirectPayment(data))
def test_setExpressCheckout(self):
# We'll have to stub out tests for doExpressCheckoutPayment and friends
# because they're behind paypal's doors.
nvp_obj = self.wpp.setExpressCheckout(self.item)
self.assertTrue(nvp_obj.ack == "Success")
### DoExpressCheckoutPayment
# PayPal Request:
# {'amt': '10.00',
# 'cancelurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'custom': u'website_id=480&cname=1',
# 'inv': u'website-480-cname',
# 'method': 'DoExpressCheckoutPayment',
# 'next': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'payerid': u'BN5JZ2V7MLEV4',
# 'paymentaction': 'Sale',
# 'returnurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'token': u'EC-6HW17184NE0084127'}
#
# PayPal Response:
# {'ack': 'Success',
# 'amt': '10.00',
# 'build': '848077',
# 'correlationid': '375f4773c3d34',
# 'currencycode': 'USD',
# 'feeamt': '0.59',
# 'ordertime': '2009-03-04T20:56:08Z',
# 'paymentstatus': 'Completed',
# 'paymenttype': 'instant',
# 'pendingreason': 'None',
# 'reasoncode': 'None',
# 'taxamt': '0.00',
# 'timestamp': '2009-03-04T20:56:09Z',
# 'token': 'EC-6HW17184NE0084127',
# 'transactionid': '3TG42202A7335864V',
# 'transactiontype': 'expresscheckout',
# 'version': '54.0'}
| 32.973333
| 234
| 0.593207
| 507
| 4,946
| 5.731755
| 0.467456
| 0.018582
| 0.018582
| 0.01755
| 0.129387
| 0.129387
| 0.118032
| 0.084997
| 0.084997
| 0.084997
| 0
| 0.073118
| 0.247877
| 4,946
| 150
| 235
| 32.973333
| 0.708065
| 0.324909
| 0
| 0.116279
| 0
| 0
| 0.196841
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 1
| 0.093023
| false
| 0.011628
| 0.093023
| 0
| 0.244186
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e00f75413d6a65ba71109974edd248bc1533ce8f
| 1,010
|
py
|
Python
|
Hackerrank_Bot_Saves_Princess.py
|
madhurgupta96/Algorithmic-Journey
|
75868af1050c99fc25e295812ba1a47468c6737f
|
[
"Apache-2.0"
] | null | null | null |
Hackerrank_Bot_Saves_Princess.py
|
madhurgupta96/Algorithmic-Journey
|
75868af1050c99fc25e295812ba1a47468c6737f
|
[
"Apache-2.0"
] | null | null | null |
Hackerrank_Bot_Saves_Princess.py
|
madhurgupta96/Algorithmic-Journey
|
75868af1050c99fc25e295812ba1a47468c6737f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 19:46:40 2020
@author: Intel
"""
def displayPathtoPrincess(n,grid):
me_i=n//2
me_j=n//2
for i in range(n):
if 'p' in grid[i]:
pe_i=i
for j in range(n):
if 'p'==grid[i][j]:
pe_j=j
break
break
while((me_i!=pe_i) | (me_j!=pe_j)):
if(me_i-pe_i<0):
print('DOWN')
me_i=me_i+1
elif(me_i-pe_i>0):
print('UP')
me_i=me_i-1
else:
if(me_j-pe_j>0):
print('LEFT')
me_j=me_j-1
elif(me_j-pe_j<0):
print('RIGHT')
me_j=me_j+1
else:
break
m = int(input())
grid = []
for i in range(0, m):
grid.append(input().strip())
displayPathtoPrincess(m,grid)
| 22.954545
| 40
| 0.372277
| 135
| 1,010
| 2.607407
| 0.325926
| 0.068182
| 0.045455
| 0.051136
| 0.278409
| 0.136364
| 0
| 0
| 0
| 0
| 0
| 0.045276
| 0.49703
| 1,010
| 44
| 41
| 22.954545
| 0.647638
| 0.073267
| 0
| 0.15625
| 0
| 0
| 0.019187
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0
| 0
| 0.03125
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e01063b5b93496a8c88b374770c28bc942feb23d
| 65,397
|
py
|
Python
|
gelviz/basic.py
|
HiDiHlabs/gelviz
|
515f0462738b44609679c2a26c7d8ac3ed3b4b2b
|
[
"BSD-3-Clause"
] | null | null | null |
gelviz/basic.py
|
HiDiHlabs/gelviz
|
515f0462738b44609679c2a26c7d8ac3ed3b4b2b
|
[
"BSD-3-Clause"
] | null | null | null |
gelviz/basic.py
|
HiDiHlabs/gelviz
|
515f0462738b44609679c2a26c7d8ac3ed3b4b2b
|
[
"BSD-3-Clause"
] | null | null | null |
import matplotlib.pyplot as plt
import pybedtools
import pandas as pnd
import numpy as np
import tabix
import matplotlib.ticker as ticker
from matplotlib.patches import Rectangle
from matplotlib.patches import Arrow
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import matplotlib.cm as cm
import matplotlib
import tabix
import math
def plotGenes(genes_bed,
exons_bed,
introns_bed,
region_bed,
blacklist=None,
gene_map=None,
plot_gene_ids=True,
y_max=None,
distance_ratio=0.1,
ax=None,
plot_legend=False,
legend_loc="lower right",
color_plus="#80b1d3",
color_minus="#fb8072"):
"""Function for plotting gene structures, i.e. introns exons of genes.
:param genes_bed: :class:`pybedtools.BedTool` object containing TX start,
and TX end of genes.
:type genes_bed: :class:`pybedtools.BedTool`
:param exons_bed: :class:`pybedtools.BedTool` object containing exons of
genes.
:type exons_bed: :class:`pybedtools.BedTool`
:param introns_bed: :class:`pybedtools.BedTool` object containing introns
:type introns_bed: :class:`pybedtools.BedTool`
:param region_bed: :class:`pybedtools.BedTool` object containing the one
region, for which the gene plot is created.
:type region_bed: :class:`pybedtools.BedTool`
:param blacklist: List of gene names, for genes that should not be shown on
the plot, default is None
:type blacklist: list, optional
:param plot_gene_ids: If True, all gene ids will be included in the plot,
False otherwise, default is True
:type plot_gene_ids: bool, optional
:param y_max: Max y value in the gene plot. If not set, then y_max is the
max number of stacked genes, default is None.
:type y_max: bool, optional
:param distance_ratio: Minimal distance between two genes, as ratio of ax
width, such that two genes are plotted side by side. If this ratio is
underwent, the genes will be stacked, default is 0.1.
:type distance_ratio: float, optional
:param ax: Axes instance on which the genes are plotted, default is None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param plot_legend: If True, a legend describing plus or minus stranded
genes is plotted, False otherwise. Default is False.
:type plot_legend: bool, optional
:param legend_loc: Location of the legend. Either of "lower left",
"lower right", "upper left", "upper right", default is "lower right".
:type legend_loc: str, optional
:param color_plus: Color code for plus stranded genes, default is "#80b1d3".
:type color_plus: str, optional.
:param color_minus: Color code for minus stranded genes, default is
"#fb8072".
:type color_minus: str, optional.
:return: Tuple of max_y_pos+1.5, patch_list, patch_description_list, where
1. max_y_pos+1.5 is the max_y_position + 1.5. max_y_pos defines the \
number of stacked genes.
2. patch_list is the list of patches drawn on the ax.
3. patch_description_list is the list of descriptions for the patches \
drawn on the ax.
:rtype: list
"""
ax = ax if ax is not None else plt.gca()
genes_in_region = genes_bed
exons_in_region = exons_bed
introns_in_region = introns_bed
region_border_up = int(region_bed[0][1])
region_border_down = int(region_bed[0][2])
region_size = region_border_down-region_border_up
color_forward = color_plus
color_reverse = color_minus
max_y_pos = None
if(not len(genes_in_region) == 0):
# Determine y positions of genes for plotting
max_y_pos, y_pos_dict = determineYPosGene(genes_in_region,
(region_border_down-
region_border_up),
distance_ratio)
if(not y_max is None):
max_y_pos = y_max
# Plot Exons
for i in exons_in_region:
start = int(i[1])
end = int(i[2])
gene_name = str(i[3])
if(not blacklist is None and gene_map[gene_name] in blacklist):
continue
# Define color for gene plotting
strand = str(i[5])
color = color_forward
if(strand == "-"):
color = color_reverse
y = max_y_pos-y_pos_dict[gene_name]+0.5
rect = Rectangle((start, y-.2),
end-start,
.4,
color=color,
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
patch_list = []
patch_description_list = []
met_forward = False
met_reverse = False
# Plot Introns
for i in introns_in_region:
start = int(i[1])
end = int(i[2])
gene_name = str(i[3])
if(not blacklist is None and gene_map[gene_name] in blacklist):
continue
# Define color for gene plotting
strand = str(i[5])
color = color_forward
if(strand == "-"):
color = color_reverse
y = max_y_pos-y_pos_dict[gene_name]+0.5
patch = Rectangle((start, y-.03),
end-start,
.06,
color=color,
capstyle='butt',
linewidth=0)
ax.add_patch(patch)
if(strand == "+" and not(met_forward)):
patch_list += [patch]
patch_description_list += ["forward strand"]
met_forward = True
elif(strand == "-" and not(met_reverse)):
patch_list += [patch]
patch_description_list += ["reverse strand"]
met_reverse = True
# Plot Gene Names
if(plot_gene_ids):
for i in genes_in_region:
start = int(i[1])
gene_name = str(i[3])
if(not blacklist is None and gene_map[gene_name] in blacklist):
continue
# Define color for gene plotting
strand = str(i[5])
color = color_forward
if(strand == "-"):
color = color_reverse
border_distance_down = region_border_down-start
if(start < region_border_up):
start = region_border_up
border_distance_down = region_border_down-start
if(not(float(border_distance_down)/float(region_size)
< distance_ratio)):
gene_name = str(i[3])
gene_name_label = gene_name
if(not gene_map is None):
gene_name_label = gene_map[gene_name]
y = max_y_pos-y_pos_dict[gene_name]+.8
plt.text(start,
y,
gene_name_label,
size=5,
color = color)
gene_name = str(i[3])
gene_name_label = gene_name
if(not gene_map is None):
gene_name_label = gene_map[gene_name]
y = max_y_pos-y_pos_dict[gene_name]+.8
plt.text(start, y, gene_name_label, size=5, color = color)
plt.xlim([region_border_up, region_border_down])
plt.ylim([0, max_y_pos+1.5])
plt.yticks([], [])
if(plot_legend):
plt.legend(patch_list,
patch_description_list,
loc=legend_loc,
fontsize=5)
return max_y_pos+1.5, patch_list, patch_description_list
def determineYPosGene(genes_bed,
region_size,
distance_ratio):
'''Function that determines the max y position for gene plotting via
function plotGenes.
:param genes_bed: :class:`pybedtools.BedTool` object containing genes to be
plotted.
:type genes_bed: :class:`pybedtools.BedTool`
:param region_size: Size of region to be plotted in base pairs.
:type region_size: int
:param distance_ratio: Minimal distance between two genes, as ratio of ax
width, such that two genes are plotted side by side. If this ratio is
underwent, the genes will be stacked.
:type distance_ratio: float
:return: Tuple of
1. max_y_pos: Defines the number of stacked genes.
2. y_pos_dict: Dictionary with keys = gene ids and values = y position \
of gene.
:rtype: tuple
'''
sort_indices = [int(idx) for idx in np.argsort([i[1] for i in genes_bed])]
genes_sorted_bed = [genes_bed[i] for i in sort_indices]
y_pos_dict = {}
y_level_dict = {}
max_y_pos = 0
for interval in genes_sorted_bed:
gene_name = interval[3]
gene_start = int(interval[1])
gene_end = int(interval[2])
for i in range(max_y_pos+1):
if(i == 0 and not max_y_pos in y_level_dict):
y_pos_dict[gene_name] = i
y_level_dict[i] = [[gene_start, gene_end]]
break
elif(gene_start > y_level_dict[i][-1][1] and
float(gene_start-y_level_dict[i][-1][0])/float(region_size) >
distance_ratio):
y_pos_dict[gene_name] = i
y_level_dict[i] += [[gene_start, gene_end]]
break
elif(i == max_y_pos):
max_y_pos += 1
y_pos_dict[gene_name] = max_y_pos
y_level_dict[max_y_pos] = [[gene_start, gene_end]]
break
else:
continue
return max_y_pos, y_pos_dict
def createGeneNameMap(gene_name_mapping_filename):
'''Function that creates a mapping between gene ids
:param gene_name_mapping_file: Path to a tab separated file, for which the
first column is a ensemble gene id, and the second column is the HUGO
gene name
:type gene_name_mapping_file: str
:return: Dictionary containing the gene id mapping.
:rtype: dictionary
'''
gene_name_mapping_file = open(gene_name_mapping_filename, "r")
gene_map = {}
for line in gene_name_mapping_file:
split_line = line.rstrip().split("\t")
ensembl_gene_id = split_line[0].split(".")[0]
hugo_gene_symbol = split_line[1].split(".")[0]
gene_map[ensembl_gene_id] = hugo_gene_symbol
gene_name_mapping_file.close()
return gene_map
def plotGeneExpression(genes_bed,
region_bed,
expression_df_g1,
expression_df_g2,
gene_names_map,
blacklist=None,
ax=None,
plot_legend=False,
color_g1="#fb8072",
color_g2="#80b1d3",
g1_id="tumor",
g2_id="normal",
plot_gene_names=True):
'''Function for plotting paired gene expression (e.g. tumor and normal) on a
gene region scale retaining the position of genes.
:param genes_bed: :class:`pybedtools.BedTool` object containing TXstart,
and TXend of genes.
:type genes_bed: :class:`pybedtools.BedTool`
:param region_bed: :class:`pybedtools.BedTool` object containing the region
to be plotted
:type region_bed: :class:`pybedtools.BedTool`
:param expression_df_g1: :class:`pandas.Dataframe` containing the expression
values of g1 samples (columns: sample ids; index: gene ids)
:type expression_df_g1: :class:`pandas.DataFrame`
:param expression_df_g2: :class:`pandas.Dataframe` containing the expression
values of g2 samples (columns: sample ids; index: gene ids)
:type expression_df_g2: :class:`pandas.DataFrame`
:param gene_names_map: Dictionary with keys: ENSEMBL GENE IDs, and values:
HUGO GENE SYMBOLs.
:type gene_names_map: dict.
:param blacklist: Set containing gene ids not to be plotted, default to
None.
:type blacklist: set, optional
:param ax: Axis used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param plot_legend: If True legend is plotted, False otherwise, defaults to
False.
:type plot_legend: bool
:param color_g1: Color used for plotting g1 samples expression, defaults to
"#fb8072".
:type color_g1: str, optional
:param color_g2: Color used for plotting g2 samples expression, defaults to
"#80b1d3".
:type color_g2: str, optional
:param g1_id: ID of g1 used for legend plotting, defaults to "tumor".
:type g1_id: str, optional
:param g2_id: ID of g2 used for legend plotting, defaults to "normal".
:type g2_id: str, optional
:param plot_gene_names: If True, the HUGO GENE SYMBOLs will be shown, else
the GENE SYMBOLs are hidden.
:type plot_gene_names: bool.
:return: Axis on which plot was placed.
:rtype: :class:`matplotlib.axes._subplots.AxesSubplot`
'''
ax = ax if ax is not None else plt.gca()
# Get gene names and regions
genes_in_region_bed = genes_bed.intersect(region_bed,
wa=True,
u=True).sort()
gene_names = []
gene_regions = []
for e in genes_in_region_bed:
gene_name_ens = str(e[3])
gene_names += [gene_names_map[gene_name_ens]]
gene_regions += [[int(e[1]), int(e[2])]]
region_right_border = int(region_bed[0][2])
region_left_border = int(region_bed[0][1])
# Determine minimal extension of barplot
extension=None
for i in range(len(gene_regions)):
if(not blacklist is None and gene_names[i] in blacklist):
continue
left_border = gene_regions[i][0]
right_border = None
if(i < len(gene_names)-1):
right_border = gene_regions[i+1][0]
else:
right_border = region_right_border
current_extension = right_border-left_border
if(current_extension == 0.):
continue
if(extension is None):
extension = float(current_extension)
elif(current_extension < extension):
extension = float(current_extension)
boxprops = {"color": "k", "linewidth": .3}
flierprops = {"color": "k"}
medianprops = {"color": "k", "linewidth": .3}
whiskerprops = {"color": "k", "linewidth": .3}
capprops={"color": "k", "linewidth": .3}
patch_list = None
patch_description_list = None
tick_positions = []
gene_names_clean = []
counter=0
patch_saved = False
for gene_name in gene_names:
left_border = gene_regions[counter][0]
right_border = region_right_border
if(not blacklist is None and gene_name in blacklist):
counter += 1
continue
if(counter < len(gene_names)-1):
right_border = gene_regions[counter+1][0]
bplot_g1_pos = left_border + extension/4.
bplot_g2_pos = left_border + 3*(extension/4.)
tick_positions += [left_border + extension/2.]
gene_names_clean += [gene_name]
exp_values_g1 = expression_df_g1.loc[gene_name, :]
if(type(exp_values_g1).__name__ == "Series"):
exp_values_g1 = list(exp_values_g1)
else:
exp_values_g1 = list(exp_values_g1.iloc[0, :])
exp_values_g2 = expression_df_g2.loc[gene_name, :]
if(type(exp_values_g2).__name__ == "Series"):
exp_values_g2 = list(exp_values_g2)
else:
exp_values_g2 = list(exp_values_g2.iloc[0, :])
bplot_g1 = ax.boxplot([np.log2([i if
i >= 1. else
1. for
i in exp_values_g1])],
positions=[bplot_g1_pos],
widths=extension/2.,
patch_artist=True,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
whiskerprops=whiskerprops,
capprops=capprops,
showfliers=False)
bplot_g2 = ax.boxplot([np.log2([i if
i >= 1. else
1. for
i in exp_values_g2])],
positions=[bplot_g2_pos],
widths=extension/2.,
patch_artist = True,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
whiskerprops=whiskerprops,
capprops=capprops,
showfliers=False)
bplot_g1["boxes"][0].set_facecolor(color_g1)
bplot_g2["boxes"][0].set_facecolor(color_g2)
if(not patch_saved):
patch_saved=True
patch_list = [bplot_g1["boxes"][0], bplot_g2["boxes"][0]]
patch_description_list = [g1_id, g2_id]
counter += 1
ax.set_xlim(region_left_border, region_right_border)
ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions)))
ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean)))
if(not plot_gene_names):
ax.xaxis.set_major_formatter(
ticker.FixedFormatter(([ " " for i in
gene_names_clean])))
for tick in ax.get_xticklabels():
tick.set_rotation(45)
tick.set_size(6)
for ytick in ax.get_yticklabels():
ytick.set_size(6)
if(plot_legend):
ax.legend(patch_list,
patch_description_list,
fontsize=5,
loc='lower left')
return ax
def plotGeneExpressionEqualDist(genes_bed,
gene_mid_points,
region,
expression_df,
groups,
gene_names_map=None,
blacklist=None,
ax=None,
plot_legend=False,
colors=None,
ids=None,
plot_gene_names=True,
position_gene_names="bottom",
log_transformed=True,
plot_points=False,
alpha=.5):
'''Function for plotting grouped gene expression (e.g. tumor and normal) on
a gene region scale equalizing the position of genes.
:param genes_bed: :class:`pybedtools.BedTool` object containing gene
regions.
:type genes_bed: :class:`pybedtools.BedTool`
:param gene_mid_points: list of integer values containing center positions
of genes.
:type gene_mid_points: list
:param region: List containing the region to be plotted
([<chrom>, <start>, <end>]).
:type region: list
:param groups: List of lists containing the IDs of the different groups.
:type groups: list
:param gene_names_map: Dictionary with keys: ENSEMBL GENE IDs, and values:
HUGO GENE SYMBOLs.
:type gene_names_map: dict.
:param expression_df: class:`pandas.DataFrame` object containing the
expression values of all samples (columns: sample ids; index: gene ids).
:type expression_df: class:`pandas.DataFrame`
:param blacklist: Set containing gene ids not to be plotted, defaults to
None,
:type blacklist: set, optional
:param ax: (default: None) Axis used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param plot_legend: If True plot legend, False otherwise, defaults to False.
:type plot_legend: bool, optional
:param colors: List of colors used for plotting samples expression. The
number of colors must be the same as the number of groups, defaults to
None.
:type colors: str, optional
:param ids: IDs used for legend plotting, defaults to None. Number of ids
must be the same as the number of groups.
:type ids: list, optional.
:param plot_gene_names: True if gene names shall be plotted,
False otherwise, defaults to True.
:type plot_gene_names: bool, optional
:param position_gene_names: Either of "top", or "bottom", defaults to
"bottom".
:type position_gene_names: str, optional
:param log_transformed: If True use log transformed values for plotting,
non-transformed values otherwise.
:type log_transformed: bool, optional
:param plot_points: If True, a point per expression value is plotted in
addition to the boxplot, no points are plotted otherwise, defaults to
False.
:type plot_points: bool, optional
:param alpha: Alpha value for the background color of the boxplots boxes,
defaults to 0.5.
:type alpha: float, optional
:return: Plots axis.
:rtype: :class:`matplotlib.axes._subplots.AxesSubplot`
'''
standard_colors = ["#66c2a5",
"#fc8d62",
"#8da0cb",
"#ec87c2",
"#a6d854",
"#ffd92f",
"#e5c494",
"#bbbbbb"]
ax = ax if ax is not None else plt.gca()
region_bed = pybedtools.BedTool("\t".join([str(i) for i in region]),
from_string=True)
# Get gene names and regions
genes_in_region_bed = genes_bed.intersect(region_bed,
wa=True,
u=True).sort()
gene_names = []
gene_regions = []
for e in genes_in_region_bed:
gene_name_ens = str(e[3])
if(not gene_names_map is None):
gene_names += [gene_names_map[gene_name_ens]]
else:
gene_names += [gene_name_ens]
gene_regions += [[int(e[1]), int(e[2])]]
region_right_border = int(region_bed[0][2])
region_left_border = int(region_bed[0][1])
# Determine minimal extension of barplot
extension=None
if(len(gene_mid_points) <= 1):
extension=region[2]-region[1]
else:
extension=gene_mid_points[1]-gene_mid_points[0]
# Subtract a small percentage of region size from extension
extension=extension-(region[2]-region[1])*.01
boxprops = {"color": "k", "linewidth": .3, "alpha":alpha}
flierprops = {"color": "k"}
medianprops = {"color": "k", "linewidth": .3}
whiskerprops = {"color": "k", "linewidth": .3}
capprops={"color": "k", "linewidth": .3}
patch_list = []
patch_description_list = []
tick_positions = []
gene_names_clean = []
counter=0
for gene_name in gene_names:
left_border = gene_mid_points[counter]-extension/2
right_border = gene_mid_points[counter]+extension/2
if(not blacklist is None and gene_name in blacklist):
counter += 1
continue
n_groups = len(groups)
for g in range(n_groups):
bplot_pos = left_border + (2*g+1)*extension/float((n_groups*2.))
tick_positions += [left_border + extension/2.]
gene_names_clean += [gene_name]
exp_values = expression_df.loc[gene_name, groups[g]]
if(type(exp_values).__name__ == "Series"):
exp_values = list(exp_values)
else:
exp_values = list(exp_values.iloc[0, :])
expression_values = exp_values
if(log_transformed):
expression_values = np.log2([i
if i >= 1.
else 1.
for i in exp_values])
bplot = ax.boxplot(expression_values,
positions=[bplot_pos],
widths=extension/float(n_groups),
patch_artist=True,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
whiskerprops=whiskerprops,
capprops=capprops,
showfliers=False)
color = None
if(not colors is None):
color = colors[g]
else:
color = standard_colors[g]
bplot["boxes"][0].set_facecolor(color)
if(plot_points):
x_positions = [ (bplot_pos+
(i-.5)*
((2*extension)/(float(n_groups)*3))) for i in
list(np.random.rand(len(expression_values))) ]
plt.plot(x_positions, expression_values, "k.", markersize=3)
g_id = None
if(not ids is None):
g_id = ids[g]
else:
g_id = "group "+str(g)
if(not g_id in patch_description_list):
patch_list += [bplot["boxes"][0]]
patch_description_list += [g_id]
counter += 1
ax.set_xlim(region_left_border, region_right_border)
if(position_gene_names == "top"):
ax.xaxis.set_ticks_position("top")
ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions)))
ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean)))
if(not plot_gene_names):
ax.xaxis.set_major_formatter(ticker.FixedFormatter(
([ " " for i in
gene_names_clean])))
for tick in ax.get_xticklabels():
tick.set_rotation(45)
tick.set_size(5)
for ytick in ax.get_yticklabels():
ytick.set_size(5)
if(plot_legend):
ax.legend(patch_list,
patch_description_list,
fontsize=5,
loc='lower left')
return ax
def plotGenomicSegments(segments_list,
chrom,
start,
end,
ax = None):
'''Function for plotting genomix segments in different colors
:param segments_tabix_filename: Path to tabixed bed file containing
(chrom, start, end, name, score, strand, start, end, color). The color
field is used to determine the color for plotting (R,G,B).
:type segments_Tabix_filename: str
:param chrom: Chromosome of the region to be plotted.
:type chrom: str
:param start: Start position of the region to be plotted.
:type start: str
:param end: End position of the region to be plotted.
:type end: str
:param ax: Axis used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Dictionary with keys = names of segments, and values patch
:rtype: dict
'''
ax = ax if ax is not None else plt.gca()
patches_dict = {}
for segment in segments_list:
segment_start = int(segment[1])
segment_end = int(segment[2])
color = tuple([ float(i)/256. for i in
str(segment[-1]).split(",") ]+[1])
segment_type = str(segment[3])
if(segment_type == "R"):
color = (1,1,1,1)
rect = Rectangle((segment_start, 0),
segment_end-segment_start,
1,
color=color)
ax.add_patch(rect)
patches_dict[segment_type] = rect
plt.xlim(int(start), int(end))
plt.ylim(0, 1)
plt.yticks([], [])
return patches_dict
def plotCNVs(cnvs_bed,
chromosome,
start,
end,
ploidy=2,
cnv_threshold=0.7,
color_gain="g",
color_loss="r",
color_neutral="k",
ax=None):
'''Function for plotting CNV segments
:param cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs with
following entries:
1. Chromosome,
2. Start Position,
3. End Position,
4. Deviation from ploidy,
5. True Copy Number)
:type cnvs_bed: :class:`pybedtools.BedTool`
:param chromosome: Chromosome for which to plot CNVs.
:type chromosome: str
:param start: Start position on chromosome.
:type start: int
:param end: End position on chromosome.
:type end: int
:param ploidy: Assumed ploidy of tumor, defaults to 2.
:type ploidy: int, optional
:param cnv_threshold: Minimal deviation from ploidy to be considered as a
CNV, defaults to 0.7.
:type cnv_threshold: float, optional
:param color_gain: Plot color of copy number gains, defaults to "g".
:type color_gain: str, optional
:param color_loss: Plot color of copy number losses, defaults to "r".
:type color_loss: str, optional
:param color_neutral: Plot color of copy number neutral regions, defaults to
"k".
:type color_neutral: str, optional
:param ax: Axis used for plotting.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned
:rtype: None
'''
# Use given axis for plotting
ax = ax if ax is not None else plt.gca()
for interval in cnvs_bed:
current_start = int(interval[1])
current_end = int(interval[2])
ploidy_dev = float(interval[3])
tcn = float(interval[4])
# Smooth tcn, if ploidy_dev is smaller than cnv_threshold
if(abs(ploidy_dev) < cnv_threshold):
tcn = ploidy
color = color_neutral
if(ploidy_dev >= cnv_threshold):
color=color_gain
elif(ploidy_dev <= -1.*cnv_threshold):
color = color_loss
if(abs(ploidy_dev) > cnv_threshold):
rect = Rectangle((current_start, tcn-.2),
current_end-current_start,
.4,
color=color,
edgecolor='none',
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
else:
rect = Rectangle((current_start, tcn-.1),
current_end-current_start,
.2,
color=color,
edgecolor='none',
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
# Plot thresholds
color_threshold=(189./255., 189./255., 189./255., 0.5)
if(ploidy == 2):
plt.plot([int(start), int(end)],
[1, 1],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[2, 2],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[3, 3],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[4, 4],
color=color_threshold,
linestyle="--",
linewidth=.5)
elif(ploidy == 4):
plt.plot([int(start), int(end)],
[1, 1],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[2, 2],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[3, 3],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[4, 4],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[5, 5],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[6, 6],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.xlim([int(start), int(end)])
if(ploidy == 2):
plt.ylim([0, 4.5])
plt.yticks([0, 1, 2, 3, 4], ["0", "1", "2", "3", "4"], size=6)
elif(ploidy == 4):
plt.ylim([0, 6.5])
plt.yticks([0, 2, 4, 6], ["0", "2", "4", "6"], size=6)
plt.xticks(rotation=45)
def plotCNVsHeat(cnvs_bed,
chromosome,
start,
end,
ploidy=2,
cnv_threshold=0.7,
cmap="bwr",
max_dev=None,
ax=None):
'''Function for plotting CNV segments as heatmap
:param cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs with
following entries:
1. Chromosome,
2. Start Position,
3. End Position,
4. Deviation from ploidy,
5. True Copy Number)
:type cnvs_bed: :class:`pybedtools.BedTool`
:param chromosome: Chromosome for which to plot CNVs.
:type chromosome: str
:param start: Start position on chromosome.
:type start: int
:param end: End position on chromosome.
:type end: int
:param ploidy: Assumed ploidy of tumor, defaults to 2.
:type ploidy: int, optional
:param cnv_threshold: Minimal deviation from ploidy to be considered as a
CNV, defaults to 0.7.
:type cnv_threshold: float, optional
:param cmap: Colormap used for plotting CNVs, defaults to "bwr".
:type cmap: str, optional
:param max_dev: Maximal deviation from ploidy to plot, defaults to None.
:type max_dev: float, optional
:param ax: Axis used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
# Use given axis for plotting
ax = ax if ax is not None else plt.gca()
colors = plt.cm.get_cmap(cmap)
if(max_dev is None):
max_dev = max([abs(float(i[3])) for i in cnvs_bed])
for interval in cnvs_bed:
current_start = int(interval[1])
current_end = int(interval[2])
ploidy_dev = float(interval[3])
tcn = float(interval[4])
if(tcn < -1.*max_dev):
tcn = -1.*max_dev
elif(tcn > max_dev):
tcn = max_dev
color = colors((ploidy_dev+max_dev)/(2*max_dev))
if(abs(ploidy_dev) < cnv_threshold):
color=colors(.5)
rect = Rectangle((current_start, .5),
current_end-current_start,
1,
color=color,
edgecolor='none',
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
plt.xlim([int(start), int(end)])
plt.ylim([.5, 1.5])
plt.xticks([], [])
plt.yticks([], [])
def readACESeqAsBed(input_filename):
'''Function that reads CNVs from ACESeq ("*most_important*") files and
converts them to pybedtools.BedTool object
:param input_filename: Full path to ACESeq "most_important" file
:type input_filename: str
:return: :class:`pybedtools.BedTool` object containing CNVs from ACESeq
:rtype: :class:`pybedtools.BedTool`
'''
input_file = open(input_filename, "r")
cnv_bed_list = []
ploidy = None
for line in input_file:
if(line[:7] == "#ploidy"):
ploidy = float(line.rstrip().split(":")[1])
print(ploidy)
if(line[0] == "#" or line[:5] == "chrom"):
continue
split_line = line.rstrip().split("\t")
ploidy_dev = float(split_line[5])-ploidy
chrom = split_line[0]
if(chrom == "23"):
chrom="X"
elif(chrom == "24"):
chrom = "Y"
cnv_bed_list += [ [chrom,
split_line[1],
split_line[2],
str(ploidy_dev),
split_line[5],
"+"]
]
input_file.close()
return pybedtools.BedTool("\n".join(["\t".join(e) for e in
cnv_bed_list]),
from_string=True)
def plotChIPSignals(chip_signals,
r_chrom,
r_start,
r_end,
ax=None,
color="b",
offset=None,
merge=None):
'''Function that plots bedGraph like iterators.
:param chip_signals: Iterator for which each element is a list-ike
object containing:
1. Chromosome
2. Start postion
3. End position
4. Value to be plotted as bar
:type chip_signals: iterator
:param r_chrom: Chromosome of region to be plotted.
:type r_chrom: str
:param r_start: Start position of region to be plotted.
:type r_start: int
:param r_end: End position of region to be plotted.
:type r_end: int
:param ax: Axis of plot
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param color: color of bars, defaults to "b".
:type color: str, optional
:param offset: Length of intervals, defaults to None.
:type offset: int, optional
:param merge: Number of elements to be merged. If this value is not equal to
0, than merge elements will be averaged an plotted, defaults to 0.
:type merge: int, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
max_signal = 0
left = []
height = []
for signal in chip_signals:
start = int(signal[1])
end = int(signal[2])
value = float(signal[3])
if(value > max_signal):
max_signal = value
if(not offset is None):
end = start + offset
left += [start]
height += [value]
left_merged = []
height_merged = []
if(not merge is None):
heights = []
lefts = []
for i in range(len(left)):
if(i % merge == 0 and not (i == 0)):
left_merged += [lefts[0]]
lefts = []
height_merged += [np.mean(heights)]
heights = []
heights += [height[i]]
lefts += [left[i]]
if(not i % merge == 0):
left_merged += [lefts[0]]
lefts = []
height_merged += [np.mean(heights)]
heights = []
offset = merge*offset
left = left_merged
height = height_merged
plt.bar(left, height, offset, color = color, edgecolor = color)
plt.xlim(r_start, r_end)
def plotMethylationProfileHeat(methylation_bed,
chrom,
start,
end,
bin_size=1000,
ax = None):
'''Function for plotting methylation values as heatmap
:param methylation_bed: Methylation calls. Following fields must be
included: Chrom, Start, End, Methylated Cs, Unmethylated Cs.
:type methylation_bed: :class:`pybedtools.BedTool`
:param chrom: Chromosome of region to be plotted.
:type chrom: str
:param start: Start position of region to be plotted.
:type start: int
:param end: End position of region to be plotted.
:type end: int
:param bin_size: size of bin to average methylation values, defaults to
1000.
:type bin_size: int, optional
:param ax: Axis to be used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
binned_meth_calls = [ [0, 0] for i in range(int(((end-start)/bin_size)+1)) ]
counter = 0
for element in methylation_bed:
# Determine bin
position = int(element[1])
if(position < start or position > end):
continue
n_meth = int(element[3])
n_unmeth = int(element[4])
current_bin = int((position-start)/bin_size)
counter += 1
binned_meth_calls[current_bin][0] += n_meth
binned_meth_calls[current_bin][1] += n_unmeth
binned_average_meth = [ float(i[0])/(float(i[0])+float(i[1]))
if (float(i[0])+float(i[1])) > 0
else "NA"
for i in binned_meth_calls ]
binned_average_meth_no_missing = []
n = len(binned_average_meth)
for i in range(n):
if(not binned_average_meth[i] == "NA"):
binned_average_meth_no_missing += [binned_average_meth[i]]
else:
meth_before = (binned_average_meth[i-1]
if not i == 0
else "NA")
meth_after = (binned_average_meth[i+1]
if not i == len(binned_average_meth)-1
else "NA")
average_list = [ j
for j
in [meth_before, meth_after]
if not j == "NA" ]
binned_average_meth_no_missing += [ (float(sum(average_list))/
float(len(average_list)))
if len(average_list) > 0
else 0. ]
binned_average_meth = binned_average_meth_no_missing
# Plot average methylation values per bin
# Define Colormap
cmap = cm.bwr
norm = matplotlib.colors.Normalize(vmin=0., vmax=1.)
m = matplotlib.cm.ScalarMappable(norm = norm, cmap = cmap)
for cbin in range(len(binned_average_meth)):
rect = Rectangle((start+cbin*bin_size, 0),
bin_size,
1,
color=m.to_rgba(binned_average_meth[cbin]))
ax.add_patch(rect)
plt.xlim([start, end])
plt.ylim([0, 1])
plt.xticks([], [])
plt.yticks([], [])
def plotMethylationProfile(meth_calls,
chrom,
start,
end,
color="k",
ax=None):
'''Function that plots methylation values as dot plots.
:param meth_calls: Iterator containing list-like elements with the following
entries:
1. Chromsome
2. Start position
3. end position
4. Number methylated cytosines
5. Number unmethylated cytosines
Or
1. Chromsome
2. Start position
3. end position
4. Beta Value
:type meth_calles: iterator
:param chrom: Chromosome of region to be plotted.
:type chrom: str
:param start: Start position of region to be plotted.
:type start: int
:param end: End position of region to be plotted.
:type end: int
:param color: Color of points representing methylation values, defaults to
"k".
:type color: str, optional
:param ax: Axis of plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
n_entries = len(meth_calls[0])
if(n_entries == 5):
plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls ],
[ float(m[3])/(float(m[3])+float(m[4]))
if not(float(m[3])+float(m[4]) == 0.)
else 0. for m in meth_calls],
color=color,
marker=".",
linestyle='None',
markersize=1,
alpha=.5)
elif(n_entries == 4):
plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls ],
[ float(m[4]) for m in m in meth_calls],
color=color,
marker=".",
linestyle='None',
markersize=1,
alpha=.5)
plt.ylim([0, 1])
plt.xticks([], [])
plt.xlim([start, end])
def plotTX(chrom_r,
start_r,
end_r,
TX_pos,
direction="right",
color="k",
ax=None):
'''Function that plots a translocation event as a bar, showing the part
of the genome that is translocated.
:param chrom_r: Chromosome of the region to be plotted.
:type chrom_r: str
:param start_r: Start position of the region to be plotted.
:type start_r: int
:param end_r: End position of the region to be plotted.
:type end_r: int
:param TX_pos: Position of the translocation.
:type TX_pos: int
:param direction: Direction of the genomic part that is translocated. Either
of "left" (upstream), or "right" (downstream), defaults to "left".
:type direction: str, optional
:param color: Color of the bar representing the translocation, defaults to
"k".
:type color: str, optional
:param ax: Axis of plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
TX_start = TX_pos
TX_end = end_r
if(direction == "left"):
TX_start = start_r
TX_end = TX_pos
rect = Rectangle((TX_start, .4),
TX_end-TX_start,
.2,
color=color,
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
plt.xlim([start_r, end_r])
plt.ylim([0.3, 0.7])
def plotRegions(regions,
start,
end,
color="#cbebc4",
edgecolor=False,
alpha=1,
ax = None):
'''Functions that plots genomic regions as simple rectangles.
:param regions: Iterator containig list-like elements with the following
entries:
1. Chromosome
2. Start position
3. End position
:type regions: iterator
:param start: Start position of the region to be plotted.
:type start: int
:param end: End position of the region to be plotted.
:type end: int
:param color: Color of the rectangles representing the regions to be
plotted, defaults to "#cbebc4".
:type color: str, optional
:param edge_color: Color of region edge. If False, no edge is plotted,
defaults to False.
:type edge_color: str, optional
:param alpha: Alpha value of the rectangle, representing the region to be
plotted, defaults to 1.
:type alpha: float, optional.
:param ax: Axis of plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
c = 0
for region in regions:
if(not edgecolor):
current_color = color
rect = Rectangle([int(region[1]), -.75],
int(region[2])-int(region[1]),
1.5,
facecolor=current_color,
edgecolor='none',
alpha=alpha)
c += 1
else:
current_color = color
rect = Rectangle([int(region[1]), -.75],
int(region[2])-int(region[1]),
1.5,
facecolor=current_color,
edgecolor=edgecolor,
alpha=alpha)
c += 1
ax.add_patch(rect)
plt.xticks([], [])
plt.yticks([], [])
plt.xlim([start, end])
plt.ylim([-1, 1])
def plotMotifDirections(motifs_bed,
start,
end,
head_width=0.2,
head_length=1000,
overhang=0,
color_plus="#80b1d3",
color_minus="#fb8072",
ax=None):
'''Function that plots TF motifs as arrows, indicating their directionality.
:param motifs_bed: :class:`pybedtools.BedTool` object containing regions
of the TF sited to be plotted.
:type motifs_bed: :class:`pybedtools.BedTool`
:param start: Start position of the region to be plotted.
:type start: int
:param end: End position of the region to be plotted.
:type end: int
:param head_width: Width of the arrow head as proportion of the arrow,
defaults to 0.2
:type head_width: float, optional
:param head_length: Length of the arrow in bp (depends on the region that
is plotted), defaults to 1000.
:type head_length: int, optional
:param overhang: Fraction that the arrow is swept back (0 overhang means
triangular shape). Can be negative or greater than one. Defaults to 0.
:type overhang: float, optional
:param color_plus: Color of plus stranded TF regions, defaults to "#80b1d3".
:type color_plus: str, optional
:param color_minus: Color of plus stranded TF regions, defaults to
"#fb8072".
:type color_minus: str, optional
:param ax: Axis on which to plot contact map, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
for motif in motifs_bed:
motif_start = int(motif[1])
motif_end = int(motif[2])
strand = str(motif[3])
arrow_start = motif_start
arrow_end = motif_end
color=color_plus
dx = head_length
if(strand == "-"):
arrow_start = motif_end
arrow_end = motif_start
color = color_minus
dx = -1.*head_length
plt.arrow(arrow_start,
.5,
dx,
0,
head_width=head_width,
head_length=head_length,
overhang=overhang,
head_starts_at_zero=False,
edgecolor="none",
facecolor=color,
length_includes_head=True)
plt.xlim([start, end])
plt.ylim([0.4, 0.6])
def plotHiCContactMap(contact_map,
start,
end,
segment_size,
cmap="Greys",
vmin=None,
vmax=None,
location="top",
ax=None):
'''Function that plots HiC contact maps as pyramid plots
:param contact_map: Matrix that contains the intensity values of HiC
contacts.
:type contact_map: :class:`pandas.DataFrame`
:param start: Chromosomal start position of region to be plotted.
:type start: int
:param end: Chromosomal end position of region to be plotted.
:type end: int
:param segment_size: Size of the segments for which contacts were called.
:type segment_size: int
:param cmap: Name of the colormap to be used for plotting HiC intensities,
defaults to "Greys".
:type cmap: str, optional
:param vmin: Minimal value of intensity range to be plotted, defaults to
None
:type vmin: float, optional
:param vmax: Maximal value of intensity range to be plotted, defaults to
None.
:type vmax: float, optional
:param location: Either of "top" | "bottom". If location == "top", the
pyramid points upwards, else if location == "bottom" the pyramid points
downwards, defaults to top,
:type location: str, optional
:param ax: Axis on which to plot contact map, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
contact_map_index1 = (start)/segment_size
contact_map_index2 = ((end)/segment_size)+1
sliced_contact_map = contact_map.iloc[contact_map_index1:contact_map_index2,
contact_map_index1:contact_map_index2]
if(vmin is None):
vmin = 0
if(vmax is None):
vmax = np.percentile(contact_map, 99.9)
colormap = plt.get_cmap(cmap)
for i in range(contact_map_index1, contact_map_index2):
y_range = (range(contact_map_index1+(i-contact_map_index1),
contact_map_index2)
if location == "top"
else range(contact_map_index1,
contact_map_index2-(contact_map_index2-i)))
for j in y_range:
# Define midpoint of rectangle
midpoint = (i*segment_size+(j*segment_size-i*segment_size)/2.,
(j*segment_size-i*segment_size)/2.)
vertices = [(midpoint[0]-segment_size/2., midpoint[1]),
(midpoint[0], midpoint[1]-segment_size/2.),
(midpoint[0]+segment_size/2., midpoint[1]),
(midpoint[0], midpoint[1]+segment_size/2.),
(midpoint[0]-segment_size/2., midpoint[1])
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(vertices, codes)
intensity_value = contact_map.iloc[i, j]
intensity_value = (intensity_value/vmax
if intensity_value <= vmax
else 1.)
facecolor = colormap(intensity_value)
patch = matplotlib.patches.PathPatch(path,
facecolor=facecolor,
edgecolor='none')
ax.add_patch(patch)
ax.set_xlim(start, end)
if(location == "top"):
ax.set_ylim(0, (end-start)/2.)
else:
ax.set_ylim(-1.*(end-start)/2., 0)
def distanceEqualizer(genomic_segments,
start,
end,
direction="top_down",
color="k",
ax = None):
'''Function that plots arcs from unequal distances of genomic segments to
equal distances.
:param genomic_segments: List of segments for which distances shall be
equalized (each segment is of the form [<chrom>, <start>, <end>])
:type genomic_segments: list
:param start: Start position of the genomic region.
:type start: int
:param end: End position of the genomic region.
:type end: int
:param color: Color of lines equalizing distances, defaults to "k".
:type color: str, optional
:param direction: Direction of distance equalization (top_down | bottom_up),
defaults to "top_down".
:type direction: str, optional.
:param ax: Axis on which to plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: List of equalized region midpoints.
:rtype: list
'''
ax = ax if ax is not None else plt.gca()
# Calculate midpoints of original and distance equalized segments
n_segments = len(genomic_segments)
equalized_region_size = (end-start)
if(n_segments > 0):
equalized_region_size=(end-start)/n_segments
equalized_region_mid_points = []
for i in range(1, n_segments+1):
equalized_region_mid_points += [((start+
i*equalized_region_size)-
equalized_region_size/2)]
region_mid_points = []
for e in genomic_segments:
if(int(e[1]) < start):
region_mid_points += [start+(int(e[2])-start)/2]
elif(int(e[2]) > end):
region_mid_points += [int(e[1])+(end-int(e[1]))/2]
else:
region_mid_points += [int(e[1])+(int(e[2])-int(e[1]))/2]
for i in range(len(region_mid_points)):
region_mid_point = region_mid_points[i]
equalized_region_mid_point = equalized_region_mid_points[i]
codes = []
vertices = []
if(direction == "top_down"):
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
vertices = [(region_mid_point, 1),
(region_mid_point, .8),
(equalized_region_mid_point, .2),
(equalized_region_mid_point, 0)]
else:
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
vertices = [(region_mid_point, 0),
(region_mid_point, .2),
(equalized_region_mid_point, .8),
(equalized_region_mid_point, 1)]
path = Path(vertices, codes)
path_patch = PathPatch(path,
facecolor="none",
edgecolor=color,
linewidth=.5)
ax.add_patch(path_patch)
ax.axis("off")
plt.xlim([start, end])
plt.ylim([0, 1])
return equalized_region_mid_points
def plotCoordinates(chrom,
start,
end,
color="k",
ax = None,
upper=True,
loc_coordinates="up",
revert_coordinates=False,
rotation=0):
'''Function that plots genomic coordinates in a linea fashion.
:param chrom: Chromosome of the region to be plotted.
:type chrom: str
:param start: Start position of the region to be plotted.
:type start: int
:param end: End position of the region to be plotted.
:type end: int
:param color: Color of the genomic scales elements, defaults to "k".
:type color: str, optional
:param ax: Axis of plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param upper: If True, make less ticks, else if False make more ticks.
:type upper: bool, optional
:param loc_coordinates: Either of "up" | "down". If "up", plot ticks to
upper direction, else if "down", plot ticks to lower direction, defaults
to "up".
:type loc_coordinates: str, optional
:param revert_coordinates: If True, coordinates are reverted to decreasing
order. Else, coordinates stay in increasing order, defaults to False.
:type revert_coordinates: bool, optional
:param rotation: Rotational angle of coordinate strings, defaults to 0.
:type rotation: int, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
tick_size = 10**math.ceil((np.log10((end-start)/10)))
if(not upper):
tick_size = 10**int((np.log10((end-start)/10)))
# Determine first tick position
first_tick = start+(tick_size-start%tick_size)
ticks = []
current_tick = first_tick
while(current_tick <= end):
ticks += [current_tick]
current_tick = current_tick + tick_size
scale = None
if(first_tick > 1000000):
scale = "Mb"
else:
scale="Kb"
digits_to_round = None
divisor = None
if(scale == "Mb"):
digits_to_round = int(6-np.log10(tick_size))
divisor = 1000000
else:
digits_to_round = int(5-np.log10(tick_size))
divisor = 100000
tick_labels = [ str(round(i/float(divisor), digits_to_round))+scale
for i in ticks ]
if(loc_coordinates == "up"):
plt.plot([start, end],
[0, 0],
linestyle="-",
color=color,
linewidth=1)
else:
plt.plot([start, end],
[0.3, 0.3],
linestyle="-",
color=color,
linewidth=1)
if(revert_coordinates):
ticks = [ start + end-i for i in ticks ]
ticks.reverse()
tick_labels.reverse()
print(tick_labels)
for i in range(len(ticks)):
if(loc_coordinates == "up"):
plt.plot([ticks[i], ticks[i]],
[0., .3],
linestyle="-",
color=color,
linewidth=1)
plt.text(ticks[i],
.4,
tick_labels[i],
horizontalalignment="center",
verticalalignment="bottom",
fontsize=5,
color=color,
rotation=rotation)
else:
plt.plot([ticks[i], ticks[i]],
[.3, .0],
linestyle="-",
color=color,
linewidth=1)
plt.text(ticks[i],
-.1,
tick_labels[i],
horizontalalignment="center",
fontsize=5,
color=color,
verticalalignment="top",
rotation=rotation)
plt.xlim([start, end])
plt.yticks([], [])
if(loc_coordinates == "up"):
plt.ylim([-.1, .8])
else:
plt.ylim([-1.5, .3])
plt.xticks([], [])
ax.spines["bottom"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
def plotLinksAsArcs(links_bed,
chrom_r,
start_r,
end_r,
lw=1,
color="k",
ax = None):
'''Function that plots links between genomic regions as arcs.
:param links_bed: Iterator, that contains bed-like structured lists with the
following elements:
1. Chromosome region1
2. Start region1
3. End region1
4. Chromosome region2
5. Start region2
6. End region2
:type links_bed: iterator
:param chrom_r: Chromosome of the region to be plotted.
:type chrom_r: str
:param start_r: Chromosomal start position of the region to be plotted.
:type start_r: int
:param end_r: Chromosomal end positiont of the region to be plotted.
:type end_r: int
:param color: Color of the arc, defaults to "k".
:type color: str, optional.
:param ax: Axis where the plot is drawn, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
max_dist = 0
for e in links_bed:
link_pos1 = int(e[1])+(int(e[2])-int(e[1]))/2
link_pos2 = int(e[4])+(int(e[5])-int(e[4]))/2
distance = abs(link_pos2-link_pos1)
if(distance > max_dist):
max_dist = distance
mid_point = link_pos1 + (link_pos2-link_pos1)/2
if(link_pos2 < link_pos2):
mid_point = link_pos2 + (link_pos1-link_pos2)/2
vertices = [(link_pos1, 0),
(mid_point, distance),
(link_pos2, 0)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
path = Path(vertices,
codes)
patch = PathPatch(path,
facecolor = "None",
edgecolor = color,
lw = lw)
ax.add_patch(patch)
#ax.spines["bottom"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
plt.xticks([], [])
plt.yticks([], [])
plt.xlim([start_r, end_r])
plt.ylim([0, max_dist/2])
| 35.254447
| 80
| 0.545101
| 7,733
| 65,397
| 4.448856
| 0.075779
| 0.017731
| 0.01247
| 0.015318
| 0.540273
| 0.476179
| 0.436415
| 0.396274
| 0.367875
| 0.341395
| 0
| 0.018234
| 0.360154
| 65,397
| 1,854
| 81
| 35.273463
| 0.803938
| 0.323241
| 0
| 0.470534
| 0
| 0
| 0.015456
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017495
| false
| 0
| 0.012891
| 0
| 0.037753
| 0.001842
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e010b163b6fbc347a75063de7760418370bb37d6
| 31,171
|
py
|
Python
|
toc/fsa/fsa.py
|
djrochford/toc
|
934d19b4acda55a6d4610c8a91b1a6005ff7b683
|
[
"MIT"
] | null | null | null |
toc/fsa/fsa.py
|
djrochford/toc
|
934d19b4acda55a6d4610c8a91b1a6005ff7b683
|
[
"MIT"
] | null | null | null |
toc/fsa/fsa.py
|
djrochford/toc
|
934d19b4acda55a6d4610c8a91b1a6005ff7b683
|
[
"MIT"
] | null | null | null |
"""
File containing DFA and NFA public classes
"""
import collections.abc
from itertools import product, chain, combinations
from string import printable
from typing import (
AbstractSet,
Container,
FrozenSet,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Set,
Tuple,
Union,
cast
)
from .base import (
_Base,
_extract_states_alphabet,
_error_message,
_good_alphabet,
_check_input
)
State = str
Symbol = str
Regex = str
FsaTransitionFunction = Mapping[
Tuple[State, Symbol], Union[State, AbstractSet[State]]
]
class _FSA(_Base):
def __init__(
self,
*,
transition_function: FsaTransitionFunction,
start_state: State,
accept_states: AbstractSet[State]
):
super().__init__(
transition_function=transition_function, start_state=start_state
)
self._accept_states = accept_states
self._states, self._alphabet = _extract_states_alphabet(
self._transition_function.keys()
)
self._well_defined()
@property
def alphabet(self) -> FrozenSet[Symbol]:
return self._alphabet
@property
def accept_states(self) -> AbstractSet[State]:
return self._accept_states
def _well_defined(self) -> None:
super()._well_defined()
_good_alphabet(alphabet=self.alphabet, name="alphabet")
self._good_accept()
self._good_domain(self.alphabet)
def _good_accept(self) -> None:
bad_accept_states = self.accept_states - self.states
_error_message(
bad_set=bad_accept_states,
message_singular=("Accept state {} is not a member of the fsa's "
"state set."),
message_plural=("Accept states {} are not members of the fsa's "
"state set.")
)
def _good_range(self):
raise NotImplementedError
GnfaTransitionFunction = Mapping[Tuple[State, State], Regex]
MutableGnfaTF = MutableMapping[Tuple[State, State], Regex]
class _GNFA:
def __init__(
self,
transition_function: GnfaTransitionFunction,
body_states: Set[State],
start_state: State,
accept_state: State
):
self.transition_function = transition_function
self.body_states = body_states
self.start_state = start_state
self.accept_state = accept_state
self.states = (
self.body_states | {self.start_state} | {self.accept_state}
)
def reduce(self) -> "_GNFA":
"""
Output a GNFA equivalent to `self` with one less state in it.
"""
def union_main_scope(regex: Regex) -> bool:
paren_count = 0
for char in regex:
if char == '(':
paren_count += 1
elif char == ')':
paren_count -= 1
elif char == '|':
if paren_count == 0:
return True
return False
def regex_star(regex: Regex) -> Regex:
if regex in EMPTIES:
return '€'
if len(regex) == 1:
return regex + '*'
return f"({regex})*"
def regex_concat(regex1: Regex, regex2: Regex) -> Regex:
if regex1 == 'Ø' or regex2 == 'Ø':
return 'Ø'
if regex1 == '€':
return regex2
if regex2 == '€':
return regex1
if union_main_scope(regex1):
regex1 = f'({regex1})'
if union_main_scope(regex2):
regex2 = f'({regex2})'
return regex1 + regex2
def regex_union(regex1: Regex, regex2: Regex) -> Regex:
if regex1 == "Ø":
return regex2
if regex2 == "Ø":
return regex1
return f"{regex1}|{regex2}"
rip = self.body_states.pop()
r2 = self.transition_function[(rip, rip)]
reduced_tf = {}
for state1 in self.states - {self.accept_state, rip}:
r1 = self.transition_function[(state1, rip)]
for state2 in self.states - {self.start_state, rip}:
r3 = self.transition_function[(rip, state2)]
r4 = self.transition_function[(state1, state2)]
new_regex = regex_union(
regex_concat(regex_concat(r1, regex_star(r2)), r3),
r4
)
reduced_tf[(state1, state2)] = new_regex
return _GNFA(
reduced_tf,
self.body_states - {rip},
self.start_state,
self.accept_state
)
NfaTransitionFunction = Mapping[Tuple[State, Symbol], AbstractSet[State]]
MutableNfaTF = MutableMapping[Tuple[State, Symbol], Set[State]]
class NFA(_FSA):
"""
A nondeterministic finite automaton class. Takes three keyword arguments:
- `transition_function`: Mapping[Tuple[State, Symbol], AbstractSet[State]]
- `start_state`: State
- `accept_states`: AbstractSet[State]
(Where States are strings, and Symbols are one-char strings.)
The transition function' keys implicitly define the nfa's state-set and
alphabet; the first elements of the tuples represent the nfa's states, and
the second elements are the symbols in the alphabet.
The domain of the transition function is the power-set of the nfa's state
set --- i.e., the values of the transition function dictionary should be
sets (or frozensets). The empty set is a valid value; in fact, you are
required to specify that the successor set for a given state-symbol pair is
the empty set, if it is.
You can define epsilon-moves by using the empty string in place of an
alphabet symbol in the transition function. Note that the empty string will
not be inferred to be a member of the alphabet (and hence the checks below
will work as you would expect).
The class will raise a ValueError exception on instantiation if any of the
following are true:
1. the start state is not a member of the set of states inferred from
the transition function;
2. the set of accept states is not a subset of the set of states
inferred from the transition function;
3. a member of the alphabet inferred from the transition function is
not a one-character string;
4. a member of the transition function's range is not a set;
5. the range of the transition function is not a subset of the power
set of states inferred from the transition function;
6. the transition function is missing cases -- i.e., it is not the case
that every pair of a state and a symbol is in the domain of the
transition function.
The exception message will specify which of these six conditions things
triggered the exception, and which states/symbols are the source of the
problem.
"""
def __or__(self, other: "NFA") -> "NFA":
"""
Let A be the language recognised by nfa1, and B be the language
recognized by nfa2. `nfa1 | nfa2` returns an nfa that recognizes A
union B. The cardinality of the state-set of nfa1 | nfa2 is the
cardinality of the state set of nfa1 plus the cardinality of the
state-set of nfa2 plus 1.
There is no problem with the input NFAs having different alphabets.
"""
new_self, new_other, union_tf = self._combine(other)
union_start_state = _get_new_state(new_self.states | new_other.states)
union_tf[(union_start_state, '')] = {
new_self.start_state, new_other.start_state
}
for symbol in new_self.alphabet | new_other.alphabet:
union_tf[(union_start_state, symbol)] = set()
union_accept_states = new_self.accept_states | new_other.accept_states
return NFA(
transition_function=union_tf,
start_state=union_start_state,
accept_states=union_accept_states
)
def __add__(self, other: "NFA") -> "NFA":
"""
Let A be the language recognised by nfa1, and B be the language
recognized by nfa2. `nfa1 + nfa2` returns an nfa that recognizes A
concat B -- i.e., the language consisting of the set of strings of the
form a concat b, where a is an element of A and b is an element of B.
Note that this `+` operation is not commutative.
"""
new_self, new_other, concat_tf = self._combine(other)
for state in new_self.accept_states:
if (state, '') in concat_tf:
concat_tf[(state, '')].add(new_other.start_state)
else:
concat_tf[(state, '')] = {new_other.start_state}
return NFA(
transition_function=concat_tf,
start_state=new_self.start_state,
accept_states=new_other.accept_states
)
def _combine(self, other: "NFA") -> Tuple["NFA", "NFA", MutableNfaTF]:
def prime(state: State):
return state + '`'
def copy(nfa: NFA) -> NFA:
copy_tf = {}
for state, symbol in nfa.transition_function.keys():
copy_tf[(prime(state), symbol)] = {
prime(x) for x in nfa.transition_function[(state, symbol)]
}
copy_start = prime(nfa.start_state)
copy_accept = {prime(x) for x in nfa.accept_states}
return NFA(
transition_function=copy_tf,
start_state=copy_start,
accept_states=copy_accept
)
overlap = self.states & other.states
while overlap:
other = copy(other)
overlap = self.states & other.states
def add_empty_transitions(
nfa1: NFA, nfa2: NFA
) -> Tuple[NfaTransitionFunction, NfaTransitionFunction]:
def add_one_way(nfa1: NFA, nfa2: NFA) -> NfaTransitionFunction:
new_tf = nfa1.transition_function
extra_symbols = nfa2.alphabet - nfa1.alphabet
if extra_symbols:
for pair in product(nfa1.states, extra_symbols):
new_tf[pair] = set()
return new_tf
return add_one_way(nfa1, nfa2), add_one_way(nfa2, nfa1)
self_tf, other_tf = add_empty_transitions(self, other)
new_self = NFA(
transition_function=self_tf,
start_state=self.start_state,
accept_states=self.accept_states
)
new_other = NFA(
transition_function=other_tf,
start_state=other.start_state,
accept_states=other.accept_states
)
combination_tf = {}
combination_tf.update(new_self.transition_function)
combination_tf.update(new_other.transition_function)
return new_self, new_other, combination_tf
def _good_range(self) -> None:
bad_range = {
x for x in self.transition_function.values()
if not isinstance(x, collections.abc.Set)
}
_error_message(
bad_set=bad_range,
message_singular=("Value {} in the range of the transition "
"function is not a set."),
message_plural=("Values {} in the range of the transition "
"function are not sets.")
)
transition_range: Set[Optional[AbstractSet[State]]] = set.union(
*self.transition_function.values()
)
_error_message(
bad_set=transition_range - self.states,
message_singular=("State {} in the range of the transition "
"function is not in the fsa's state set."),
message_plural=("States {} in the range of the transition "
"function are not in the fsa's state set.")
)
def _get_successors(
self, *, state_set: AbstractSet[State], symbol: Symbol
) -> FrozenSet[State]:
def get_successor(state: State, sym: Symbol) -> AbstractSet[State]:
self._transition_function = cast(
NfaTransitionFunction, self._transition_function
)
return self._transition_function.get((state, sym), frozenset())
empty: FrozenSet[State] = frozenset() # This avoids a mypy bug.
return empty.union(
*[frozenset(get_successor(state, symbol)) for state in state_set]
)
def _add_epsilons(self, state_set: AbstractSet[State]) -> FrozenSet[State]:
epsilon_neighbours = self._get_successors(
state_set=state_set, symbol=''
)
while epsilon_neighbours - state_set:
state_set = state_set | epsilon_neighbours
epsilon_neighbours = self._get_successors(
state_set=epsilon_neighbours, symbol=''
)
return frozenset(state_set)
def _transition(self, state_set: AbstractSet[State], symbol: Symbol):
return self._add_epsilons(self._get_successors(state_set=state_set, symbol=symbol))
def accepts(self, string: str) -> bool:
"""
Determines whether nfa accepts input string. Will raise a ValueError
exception is the string contains symbols that aren't in the nfa's
alphabet.
"""
_check_input(string=string, alphabet=self.alphabet)
current_states = self._add_epsilons({self.start_state})
for symbol in string:
current_states = self._transition(current_states, symbol)
return not current_states & self.accept_states == set()
def determinize(self) -> "DFA":
"""Returns a DFA that recognizes the same same language as the NFA
instance.
WARNING: The set of DFA states has the cardinality of the power-set of
the set of NFA states. For related reasons, the time complexity of this
method is exponential in the number of states of the NFA. Don't
determinize big NFAs.
"""
# powerset code an itertools recipe, from
# https://docs.python.org/3/library/itertools.html#recipes
# (minor modification to make the return a set of frozensets).
def powerset(iterable: Iterable) -> Set[FrozenSet]:
s = list(iterable)
return {
frozenset(item) for item in chain.from_iterable(
combinations(s, r) for r in range(len(s)+1)
)
}
state_sets = powerset(self.states)
determinized_tf = {}
determinized_accept = set()
for (state_set, symbol) in product(state_sets, self._alphabet):
determinzed_state = _stringify(state_set)
determinized_tf[(determinzed_state, symbol)] = _stringify(
self._transition(state_set, symbol)
)
if set(state_set) & self.accept_states:
determinized_accept.add(determinzed_state)
determinized_start = _stringify(
self._add_epsilons({self._start_state})
)
return DFA(
transition_function=determinized_tf,
start_state=determinized_start,
accept_states=determinized_accept
)
def star(self) -> "NFA":
"""
Let A be the language recognised by nfa. `nfa.self()` returns an nfa
that recognizes A* -- i.e., the set of all strings formed by
concatenating any number of members of A.
"""
star_start = _get_new_state(self.states)
star_tf = self.transition_function
star_tf[(star_start, '')] = {self.start_state}
for symbol in self.alphabet:
star_tf[(star_start, symbol)] = set()
for state in self.accept_states:
star_tf[(state, '')] = {self.start_state}
star_accepts = self.accept_states | {star_start}
return NFA(
transition_function=star_tf,
start_state=star_start,
accept_states=star_accepts
)
@staticmethod
def fit(
regex: Regex,
alphabet: AbstractSet[Symbol] = (
set(printable) - {'(', ')', '|', '*'}
)
) -> "NFA":
"""
Takes a regular expression and an alphabet (i.e., a set of
one-character strings) as input; returns an NFA that recognises the
language defined by that regular expression and that alphabet.
The alphabet parameter is optional; it's default value is
string.printable -- i.e., the set of "printable" characters, which
includes the standard ASCII letters and digits, and most common
punctuation and white space.
Actually, that's not quite right -- the default value is
string.printable *minus* parentheses, the vertical bar, the star
symbol, and the tilde, for reasons that I will explain presently.
As of now, the syntax of the regular expressions that this method takes
as input is very simple -- much simpler than the standard python
regular expresssions. All characters are intepreted as literals for
symbols in the alphabet except for '(', '')', '|', '*', '•', '€' and
'Ø'. The parentheses, vertical bar and star mean what you'd expect
them to mean if you are familiar with regular expressions. '•'
(option-8 on a mac keyboard) means concatenation. You can leave
concatentation implicit, as is usual; no need to write '•'' explicitly
if you don't want to. But it gets used internally. '€' (option-shift-2)
is used to match the empty string (because it kind of looks like an
epsilon); there's no other way to match, for instance, {'', '0'} with
the current syntax. (Quick challenge: it's not totally obvious how to
match the empty string in normal python regex syntax either, though it
can be done; give it a go.) 'Ø' (option-shift-o) represents the empty
set; you can match to the empty language with it.
For reaons related to the above, the characters '(', ')', '|', '*',
'•', '€' and 'Ø' cannot be symbols in the alphabet of the NFA. (My
apologies to speakers of Scandinavian languages for the last one; I am
very against English chauvinism, but your letter is so very close to
the empty-set symbol. If, by some miracle, there is someone who cares
about this, I will change the symbol for empty-set.)
In the absence of parentheses, the order of operations is: `*`, then
`•`, then `|`.
This method uses a version of Dijkstra's shunting yard algorithm to
parse the regex and build the NFA.
The method will raise a ValueError exception if any of the following
conditions hold:
1. the alphabet contains any of the verboten characters -- i.e.,`(`
, `)`, `|`, `*`, `•`, `€` and `Ø`,
2. the input regex string contains a character not in the alphabet,
and not one of the above veboten characters,
3. the input regex contain a binary operator followed by an
operator, or
4. the input regex does not have properly matching parentheses.
"""
operator_to_operation = {
'|': NFA.__or__,
'•': NFA.__add__
}
_error_message(
bad_set=set(NOT_SYMBOLS) & alphabet,
message_singular="Alphabet cannot contain character {}.",
message_plural="Alphabet cannot contain characters {}."
)
def fit_empty(empty: Regex) -> NFA:
tf: NfaTransitionFunction = {
pair: set() for pair in product({'q1'}, alphabet)
}
accept_states = set() if empty == 'Ø' else {'q1'}
return NFA(
transition_function=tf,
start_state='q1',
accept_states=accept_states
)
def fit_symbol(symbol: Symbol) -> NFA:
tf: MutableNfaTF = {
pair: set() for pair in product({'q1', 'q2'}, alphabet)
}
tf[('q1', symbol)] = {'q2'}
return NFA(
transition_function=tf, start_state='q1', accept_states={'q2'}
)
machine_stack: List[NFA] = []
operator_stack = ['sentinel']
def binary_operate() -> None:
right_operand = machine_stack.pop()
left_operand = machine_stack.pop()
machine = operator_to_operation[operator_stack.pop()](
left_operand, right_operand
)
machine_stack.append(machine)
def compare(operator: Regex) -> int:
return (
OPERATORS.index(operator)
- OPERATORS.index(operator_stack[-1])
)
regex = _pre_process(regex, alphabet)
for char in regex:
if char in EMPTIES:
machine_stack.append(fit_empty(char))
elif char in alphabet:
machine_stack.append(fit_symbol(char))
elif char == '*':
machine_stack[-1] = machine_stack[-1].star()
elif char in OPERATORS:
if operator_stack[-1] in PARENTHE or compare(char) > 0:
operator_stack.append(char)
else:
while (
operator_stack[-1] not in PARENTHE
and compare(char) <= 0
):
binary_operate()
operator_stack.append(char)
elif char == '(':
operator_stack.append(char)
else:
while operator_stack[-1] != '(':
binary_operate()
operator_stack.pop()
while len(operator_stack) > 1:
binary_operate()
return machine_stack.pop()
OPERATORS = ['sentinel', '|', '•', '*']
PARENTHE = ['(', ')']
EMPTIES = ['€', 'Ø']
NOT_SYMBOLS = OPERATORS + PARENTHE + EMPTIES
def _pre_process(regex: Regex, alphabet: AbstractSet[Symbol]) -> Regex:
first_char = regex[0]
if first_char in OPERATORS:
raise ValueError(f"Regex cannot start with '{first_char}'.")
processed = ''
paren_count = 0
for char in regex:
if char in alphabet or char == '(':
if len(processed) > 0:
processed += (
'•' if processed[-1] not in {'(', '|'}
else ''
)
if char not in alphabet | set(NOT_SYMBOLS):
raise ValueError(
f"Regex contains character '{char}' that is not in "
"alphabet and not an accepted regex character."
)
if char in OPERATORS and processed[-1] in {'|', '•'}:
raise ValueError(
"Regex contains binary operator followed by an "
"operator; not cool."
)
if char == '(':
paren_count += 1
if char == ')':
paren_count -= 1
if paren_count < 0:
raise ValueError(
"Right parenthesis occurs in regex withour matching "
"left parenthesis."
)
processed += char
if paren_count > 0:
raise ValueError(
"Left parenthesis occurs in regex without matching right "
"parenthesis."
)
return processed
DfaTransitionFunction = Mapping[Tuple[State, Symbol], State]
class DFA(_FSA):
"""
A deterministic finite automaton class. Takes three keyword arguments:
- `transition_function`: Mapping[Tuple[State, Symbol], State]
- `start_state`: State
- `accept_state`: AbstractSet[State]
(where States are strings and Symbols are one-char strings).
The keys of the `transition_function` implicitly define the dfa's state-set
and alphabet.
The class will raise a ValueError exception on instantiation if any of th
following are true:
* the start state is not a member of the set of states inferred from the
transition function;
* the set of accept states is not a subset of the set of states inferred
from the transition function;
* the range of the transition function is not a subset of the set of
states inferred from the transition function;
* a member of the alphabet inferred from the transition function is not a
one-character string;
* the transition function is missing a case -- i.e., it is not the case
that every pair of a state and a symbol is in the domain of the
transition function.
The exception message will specify which of these above conditions things
triggered the exception, and which states/symbols are the source of the
problem.
"""
def __or__(self, other: "DFA") -> "DFA":
"""
Let A be the language recognised by dfa1, and B be the language
recognized by dfa2. `dfa1 | dfa2` returns a dfa that recognizes A union
B. The states of dfa1 | dfa2 are ordered pairs of states from dfa1 and
dfa2. There is no problem with the input DFAs having different
alphabets.
"""
union_alphabet = self.alphabet | other.alphabet
def maybe_add_state(
dfa1: DFA, dfa2: DFA
) -> Tuple[FrozenSet[State], DfaTransitionFunction]:
new_tf = dfa1.transition_function
new_states = dfa1.states
extra_symbols = dfa2.alphabet - dfa1.alphabet
if extra_symbols:
error_state = _get_new_state(dfa1.states)
new_states = dfa1.states | {error_state}
for symbol in union_alphabet:
new_tf[(error_state, symbol)] = error_state
for symbol in extra_symbols:
for state in dfa1.states:
new_tf[(state, symbol)] = error_state
return new_states, new_tf
self_states, self_tf = maybe_add_state(self, other)
other_states, other_tf = maybe_add_state(other, self)
state_pairs = product(self_states, other_states)
union_transition_function = {}
for (state1, state2), symbol in product(state_pairs, union_alphabet):
union_transition_function[(state1 + state2, symbol)] = (
self_tf[(state1, symbol)] + other_tf[(state2, symbol)]
)
union_start_state = self.start_state + other.start_state
union_accept_states = {
_stringify(item) for item in (
set(product(self.accept_states, other_states))
| set(product(self_states, other.accept_states))
)
}
return DFA(
transition_function=union_transition_function,
start_state=union_start_state,
accept_states=union_accept_states
)
def __add__(self, other: "DFA") -> "DFA":
"""
Let A be the language recognised by dfa1, B be the language recognised
by dfa2. `dfa1 + dfa2` returns a DFA that recognises the set of all
concatenations of strings in A with strings in B. This DFA operator is
parasitic on the NFA operator; it converts the input DFAs into NFAs,
uses the NFA '+', then converts the result back to a DFA. That makes
for a relatively simple but, sadly, computationally expensive algorith.
For that reason, I recommend you don't `+` dfas with large numbers of
states.
"""
return (self.non_determinize() + other.non_determinize()).determinize()
def _gnfize(self) -> _GNFA:
gnfa_tf: MutableGnfaTF = {}
for state1, symbol in self.transition_function.keys():
state2 = self.transition_function[(state1, symbol)]
if (state1, state2) in gnfa_tf.keys():
gnfa_tf[(state1, state2)] += '|' + symbol
else:
gnfa_tf[(state1, state2)] = symbol
gnfa_start = _get_new_state(self.states)
gnfa_accept = _get_new_state(self.states | {gnfa_start})
gnfa_tf[(gnfa_start, self.start_state)] = '€'
for state in self.accept_states:
gnfa_tf[(state, gnfa_accept)] = '€'
for state1, state2 in product(
self.states | {gnfa_start}, self.states | {gnfa_accept}
):
if (state1, state2) not in gnfa_tf:
gnfa_tf[(state1, state2)] = 'Ø'
return _GNFA(gnfa_tf, set(self.states), gnfa_start, gnfa_accept)
def _good_range(self) -> None:
transition_range = set(self.transition_function.values())
bad_range = transition_range - self.states
_error_message(
bad_set=bad_range,
message_singular=("State {} in the range of the transition "
"function is not in the fsa's state set."),
message_plural=("States {} in the range of the transition "
"function are not in the fsa's state set.")
)
def accepts(self, string: str) -> bool:
"""
`my_dfa.accepts("some string")` returns `True` if my_dfa accepts "some
string", and `False` otherwise. Will raise a ValueError exception is
the string contains symbols that aren't in the DFA's alphabet.
"""
_check_input(string=string, alphabet=self.alphabet)
current_state = self.start_state
for symbol in string:
current_state = self.transition_function[(current_state, symbol)]
return current_state in self.accept_states
def encode(self) -> Regex:
"""
Let A be the language accepted by dfa. `dfa.encode()` returns a regex
string that generates A. That regex string is liable to be much more
complicated than necessary; maybe I'll figure out how to improve on
average simplicity, eventually.
"""
gnfa = self._gnfize()
while len(gnfa.states) > 2:
gnfa = gnfa.reduce()
return gnfa.transition_function[(gnfa.start_state, gnfa.accept_state)]
def non_determinize(self) -> NFA:
"""
Convenience method that takes a DFA instance and returns an NFA
instance.
"""
nd_transition_function = {
key: {value} for key, value in self.transition_function.items()
}
return NFA(
transition_function=nd_transition_function,
start_state=self.start_state,
accept_states=self.accept_states
)
def _stringify(states: Iterable[State]) -> str:
if not isinstance(states, collections.abc.Sequence):
states = list(states)
states.sort()
return "".join(states)
def _get_new_state(state_set: Container) -> State:
counter = 1
new_state = 'new_state1'
while new_state in state_set:
counter += 1
new_state = "new_state" + str(counter)
return new_state
| 39.506971
| 91
| 0.595939
| 3,719
| 31,171
| 4.838666
| 0.1331
| 0.07302
| 0.030342
| 0.017894
| 0.341039
| 0.273743
| 0.215615
| 0.188775
| 0.161378
| 0.148486
| 0
| 0.00746
| 0.320554
| 31,171
| 788
| 92
| 39.557107
| 0.841258
| 0.279715
| 0
| 0.16888
| 0
| 0
| 0.054557
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083491
| false
| 0
| 0.009488
| 0.009488
| 0.184061
| 0.003795
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e013ea72c2e27425fa2415a60a17282e347acbb7
| 45,537
|
py
|
Python
|
oregano_plugins/fusion/server.py
|
MrNaif2018/Oregano
|
cc08f813f9cbdb80d1ac607892f8439ec064ee04
|
[
"MIT"
] | null | null | null |
oregano_plugins/fusion/server.py
|
MrNaif2018/Oregano
|
cc08f813f9cbdb80d1ac607892f8439ec064ee04
|
[
"MIT"
] | null | null | null |
oregano_plugins/fusion/server.py
|
MrNaif2018/Oregano
|
cc08f813f9cbdb80d1ac607892f8439ec064ee04
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Oregano - a lightweight Ergon client
# CashFusion - an advanced coin anonymizer
#
# Copyright (C) 2020 Mark B. Lundeberg
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A basic server implementation for CashFusion. Does not natively offer SSL
support, however a server admin may run an SSL server proxy such as nginx for
that purpose.
"""
import secrets
import sys
import threading
import time
import traceback
from collections import defaultdict
import oregano.schnorr as schnorr
from oregano.address import Address
from oregano.util import PrintError, ServerError, TimeoutException
from . import fusion_pb2 as pb
from .comms import send_pb, recv_pb, ClientHandlerThread, GenericServer, get_current_genesis_hash
from .protocol import Protocol
from .util import (FusionError, sha256, calc_initial_hash, calc_round_hash, gen_keypair, tx_from_components,
rand_position)
from .validation import (check_playercommit, check_covert_component, validate_blame, ValidationError,
check_input_electrumx)
# Resistor "E series" values -- round numbers that are almost geometrically uniform
E6 = [1.0, 1.5, 2.2, 3.3, 4.7, 6.8]
E12 = [1.0, 1.2, 1.5, 1.8, 2.2, 2.7, 3.3, 3.9, 4.7, 5.6, 6.8, 8.2]
E24 = [1.0, 1.1, 1.2, 1.3, 1.5, 1.6, 1.8, 2.0, 2.2, 2.4, 2.7, 3.0, 3.3, 3.6, 3.9, 4.3, 4.7, 5.1, 5.6, 6.2, 6.8, 7.5, 8.2, 9.1]
# TODO - make these configurable
class Params:
num_components = 23
component_feerate = 1000 # sats/kB
max_excess_fee = 300000 # sats
tiers = [round(b*s) for b in [10000, 100000, 1000000, 10000000, 100000000] for s in E12]
# How many clients do we want before starting a fusion?
min_clients = 8
# If all clients submitted largest possible component (uncompressed p2pkh input), how many could we take until the result would exceed 100 kB standard tx size limitation?
max_clients = (100000 - 12) // (num_components * 173)
# Every round, clients leave ... How many clients do we need as an absolute minimum (for privacy)?
min_safe_clients = 6
# Choose the minimum excess fee based on dividing the overhead amongst players, in the smallest fusion
# (these overhead numbers assume op_return script size of 1 + 5 (lokad) + 33 (session hash) )
if min_safe_clients * num_components >= 2 * 0xfc:
# the smallest fusion could require 3-byte varint for both inputs and outputs lists
overhead = 62
elif min_safe_clients * num_components >= 0xfc:
# the smallest fusion could require 3-byte varint for either inputs or outputs lists
overhead = 60
else:
# the smallest fusion will use 1-byte varint for both inputs and outputs lists
overhead = 58
min_excess_fee = (overhead + min_safe_clients - 1) // min_safe_clients
# How many clients can share same tag on a given tier (if more try to join, reject)
max_tier_client_tags = 100
# For a given IP, how many players can they represent in the same fuse?
ip_max_simul_fuse = 3
# Guaranteed time to launch a fusion if the pool has stayed at or above min_clients for this long.
start_time_max = 1200
# Inter-fusion delay -- after starting any fusion, wait this long before starting the next one (unless hit max time or pool is full).
start_time_spacing = 120
# But don't start a fusion if it has only been above min_clients for a short time (unless pool is full).
start_time_min = 400
# whether to print a lot of logs
noisy = False
# How long covert connections are allowed to stay open without activity.
# note this needs to consider the maximum interval between messages:
# - how long from first connection to last possible Tor component submission?
# - how long from one round's component submission to the next round's component submission?
COVERT_CLIENT_TIMEOUT = 40
# used for non-cryptographic purposes
import random
rng = random.Random()
rng.seed(secrets.token_bytes(32))
def clientjob_send(client, msg, timeout = Protocol.STANDARD_TIMEOUT):
client.send(msg, timeout=timeout)
def clientjob_goodbye(client, text):
# a gentler goodbye than killing
if text is not None:
client.send_error(text)
raise client.Disconnect
class ClientThread(ClientHandlerThread):
"""Basic thread per connected client."""
def recv(self, *expected_msg_names, timeout=Protocol.STANDARD_TIMEOUT):
submsg, mtype = recv_pb(self.connection, pb.ClientMessage, *expected_msg_names, timeout=timeout)
return submsg
def send(self, submsg, timeout=Protocol.STANDARD_TIMEOUT):
send_pb(self.connection, pb.ServerMessage, submsg, timeout=timeout)
def send_error(self, msg):
self.send(pb.Error(message = msg), timeout=Protocol.STANDARD_TIMEOUT)
def error(self, msg):
self.send_error(msg)
raise FusionError(f'Rejected client: {msg}')
class ClientTag(bytes):
""" enhanced bytes object to represent a pool tag """
__slots__ = ()
def __new__(cls, ipstr, tagbytes, maxsimul):
ipb = ipstr.encode()
b = bytes([maxsimul, len(ipb)]) + ipb + tagbytes
return super().__new__(cls, b)
@property
def maxsimul(self):
return self[0]
class TagStatus:
__slots__ = ('pool', 'all_')
def __init__(self):
self.pool = 0
self.all_ = 0
class WaitingPool:
""" a waiting pool for a specific tier """
def __init__(self, fill_threshold, tag_max):
self.pool = set() # clients who will be put into fusion round if started at this tier
self.queue = list() # clients who are waiting due to tags being full
self.tags = defaultdict(TagStatus) # how are the various tags
self.fill_threshold = fill_threshold # minimum number of pool clients to trigger setting fill_time
self.fill_time = None # when did pool exceed fill_threshold
self.tag_max = tag_max # how many clients can share same tag (in pool and queue)
def check_add(self, client):
for t in client.tags:
ts = self.tags.get(t)
if ts is not None and ts.all_ >= self.tag_max:
return "too many clients with same tag"
def _add_pool(self, client):
self.pool.add(client)
for t in client.tags:
ts = self.tags[t]
ts.pool += 1
if len(self.pool) == self.fill_threshold:
self.fill_time = time.monotonic()
def add(self, client):
can_pool = True
for t in client.tags:
ts = self.tags[t]
ts.all_ += 1
if ts.pool >= t.maxsimul:
can_pool = False
if can_pool:
self._add_pool(client)
else:
self.queue.append(client)
return can_pool
def remove(self, client):
# make sure to call try_move_from_queue() after calling this
try:
self.pool.remove(client)
except KeyError:
in_pool = False
try:
self.queue.remove(client)
except ValueError:
return False
else:
in_pool = True
if len(self.pool) < self.fill_threshold:
self.fill_time = None
for t in client.tags:
ts = self.tags[t]
ts.all_ -= 1
if in_pool:
ts.pool -= 1
if ts.all_ == 0: # cleanup for no-longer-used tags
del self.tags[t]
return True
def try_move_from_queue(self):
# attempt to move clients from queue into pool
moved = []
for client in self.queue:
for t in client.tags:
ts = self.tags[t]
if ts.pool >= t.maxsimul:
break
else:
self._add_pool(client)
moved.append(client)
for client in moved:
self.queue.remove(client)
class FusionServer(GenericServer):
"""Server for clients waiting to start a fusion. New clients get a
ClientThread made for them, and they are put into the waiting pools.
Once a Fusion thread is started, the ClientThreads are passed over to
a FusionController to run the rounds."""
def __init__(self, config, network, bindhost, port, upnp = None, announcehost = None, donation_address = None):
assert network
assert isinstance(donation_address, (Address, type(None)))
if not schnorr.has_fast_sign() or not schnorr.has_fast_verify():
raise RuntimeError("Fusion requires libsecp256k1")
super().__init__(bindhost, port, ClientThread, upnp = upnp)
self.config = config
self.network = network
self.announcehost = announcehost
self.donation_address = donation_address
self.waiting_pools = {t: WaitingPool(Params.min_clients, Params.max_tier_client_tags) for t in Params.tiers}
self.t_last_fuse = time.monotonic() # when the last fuse happened; as a placeholder, set this to startup time.
self.reset_timer()
def run(self):
try:
super().run()
finally:
self.waiting_pools.clear() # gc clean
def reset_timer(self, ):
""" Scan pools for the favoured fuse:
- Out of the pool(s) with the most number of players,
- Choose the pool with the earliest fill time;
- If no pools are filled then there is no favoured fuse.
(since fill time is a float, this will almost always be unique)
"""
with self.lock:
time_best = None
tier_best = None
size_best = 0
for t, pool in self.waiting_pools.items():
ft = pool.fill_time
if ft is None:
continue
size = len(pool.pool)
if size >= size_best:
if time_best is None or ft < time_best or size > size_best:
time_best = ft
tier_best = t
size_best = size
if time_best is None:
self.tier_best_starttime = None
else:
self.tier_best_starttime = max(time_best + Params.start_time_min, self.t_last_fuse + Params.start_time_spacing)
self.tier_best = tier_best
def start_fuse(self, tier):
""" Immediately launch Fusion at the selected tier. """
with self.lock:
chosen_clients = list(self.waiting_pools[tier].pool)
# Notify that we will start.
for c in chosen_clients:
c.start_ev.set()
# Remove those clients from all pools
for t, pool in self.waiting_pools.items():
for c in chosen_clients:
pool.remove(c)
pool.try_move_from_queue()
# Update timing info
self.t_last_fuse = time.monotonic()
self.reset_timer()
# Uncomment the following to: Remove from spawned clients list, so that the fusion can continue independently of waiting server.
# self.spawned_clients.difference_update(chosen_clients)
# Kick off the fusion.
rng.shuffle(chosen_clients)
fusion = FusionController(self. network, tier, chosen_clients, self.bindhost, upnp = self.upnp, announcehost = self.announcehost)
fusion.start()
return len(chosen_clients)
def new_client_job(self, client):
client_ip = client.connection.socket.getpeername()[0]
msg = client.recv('clienthello')
if msg.version != Protocol.VERSION:
client.error("Mismatched protocol version, please upgrade")
if msg.genesis_hash:
if msg.genesis_hash != get_current_genesis_hash():
# For now, msg.genesis_hash is optional and we tolerate it
# missing. However, if the client declares the genesis_hash, we
# do indeed disallow them connecting if they are e.g. on testnet
# and we are mainnet, etc.
client.error("This server is on a different chain, please switch servers")
else:
client.print_error("👀 No genesis hash declared by client, we'll let them slide...")
if self.stopping:
return
donation_address = ''
if isinstance(self.donation_address, Address):
donation_address = self.donation_address.to_full_ui_string()
client.send(pb.ServerHello( num_components = Params.num_components,
component_feerate = Params.component_feerate,
min_excess_fee = Params.min_excess_fee,
max_excess_fee = Params.max_excess_fee,
tiers = Params.tiers,
donation_address = donation_address
))
# We allow a long timeout for clients to choose their pool.
msg = client.recv('joinpools', timeout=120)
if len(msg.tiers) == 0:
client.error("No tiers")
if len(msg.tags) > 5:
client.error("Too many tags")
# Event for signalling us that a pool started.
start_ev = threading.Event()
client.start_ev = start_ev
if client_ip.startswith('127.'):
# localhost is whitelisted to allow unlimited access
client.tags = []
else:
# Default tag: this IP cannot be present in too many fuses.
client.tags = [ClientTag(client_ip, b'', Params.ip_max_simul_fuse)]
for tag in msg.tags:
if len(tag.id) > 20:
client.error("Tag id too long")
if not (0 < tag.limit < 6):
client.error("Tag limit out of range")
ip = '' if tag.no_ip else client_ip
client.tags.append(ClientTag(ip, tag.id, tag.limit))
try:
mytierpools = {t: self.waiting_pools[t] for t in msg.tiers}
except KeyError:
if self.stopping:
return
client.error(f"Invalid tier selected: {t}")
try:
mytiers = list(mytierpools)
rng.shuffle(mytiers) # shuffle the adding order so that if filling more than one pool, we don't have bias towards any particular tier
with self.lock:
if self.stopping:
return
# add this client to waiting pools
for pool in mytierpools.values():
res = pool.check_add(client)
if res is not None:
client.error(res)
for t in mytiers:
pool = mytierpools[t]
pool.add(client)
if len(pool.pool) >= Params.max_clients:
# pool filled up to the maximum size, so start immediately
self.start_fuse(t)
return
# we have added to pools, which may have changed the favoured tier
self.reset_timer()
inftime = float('inf')
while True:
with self.lock:
if self.stopping or start_ev.is_set():
return
tnow = time.monotonic()
# scan through tiers and collect statuses, also check start times.
statuses = dict()
tfill_thresh = tnow - Params.start_time_max
for t, pool in mytierpools.items():
if client not in pool.pool:
continue
status = pb.TierStatusUpdate.TierStatus(players = len(pool.pool), min_players = Params.min_clients)
remtime = inftime
if pool.fill_time is not None:
# a non-favoured pool will start eventually
remtime = pool.fill_time - tfill_thresh
if t == self.tier_best:
# this is the favoured pool, can start at a special time
remtime = min(remtime, self.tier_best_starttime - tnow)
if remtime <= 0:
self.start_fuse(t)
return
elif remtime != inftime:
status.time_remaining = round(remtime)
statuses[t] = status
client.send(pb.TierStatusUpdate(statuses = statuses))
start_ev.wait(2)
except:
# Remove client from waiting pools on failure (on success, we are already removed; on stop we don't care.)
with self.lock:
for t, pool in mytierpools.items():
if pool.remove(client):
pool.try_move_from_queue()
if self.tier_best in mytierpools:
# we left from best pool, so it might not be best anymore.
self.reset_timer()
raise
class ResultsCollector:
# Collect submissions from different sources, with a deadline.
def __init__(self, num_results, done_on_fail = True):
self.num_results = int(num_results)
self.done_on_fail = bool(done_on_fail)
self.done_ev = threading.Event()
self.lock = threading.Lock()
self.results = []
self.fails = []
def __enter__(self, ):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
self.fails.append(exc_value)
if self.done_on_fail:
self.done_ev.set()
elif len(self.fails) + len(getattr(self, 'results', ())) >= self.num_results:
self.done_ev.set()
def gather(self, *, deadline):
remtime = deadline - time.monotonic()
self.done_ev.wait(max(0., remtime))
with self.lock:
ret = self.results
del self.results
return ret
def add(self, result):
with self.lock:
try:
self.results.append(result)
except AttributeError:
return False
else:
if len(self.fails) + len(self.results) >= self.num_results:
self.done_ev.set()
return True
class FusionController(threading.Thread, PrintError):
""" This controls the Fusion rounds running from server side. """
def __init__(self, network, tier, clients, bindhost, upnp = None, announcehost = None):
super().__init__(name="FusionController")
self.network = network
self.tier = tier
self.clients = list(clients)
self.bindhost = bindhost
self.upnp = upnp
self.announcehost = announcehost
self.daemon = True
def sendall(self, msg, timeout = Protocol.STANDARD_TIMEOUT):
for client in self.clients:
client.addjob(clientjob_send, msg, timeout)
def check_client_count(self,):
live = [c for c in self.clients if not c.dead]
if len(live) < Params.min_safe_clients:
for c in live:
c.kill("too few remaining live players")
raise FusionError("too few remaining live players")
def run (self, ):
self.print_error(f'Starting fusion with {len(self.clients)} players at tier={self.tier}')
covert_server = CovertServer(self.bindhost, upnp = self.upnp)
try:
annhost = covert_server.host if self.announcehost is None else self.announcehost
annhost_b = annhost.encode('ascii')
annport = covert_server.port
covert_server.noisy = Params.noisy
covert_server.start()
self.print_error(f'Covert server started @ {covert_server.host}:{covert_server.port} (announcing as: {annhost_b}:{annport})')
begin_time = round(time.time())
self.sendall(pb.FusionBegin(tier = self.tier,
covert_domain = annhost_b,
covert_port = annport,
covert_ssl = False,
server_time = begin_time))
self.last_hash = calc_initial_hash(self.tier, annhost_b, annport, False, begin_time)
time.sleep(Protocol.WARMUP_TIME)
# repeatedly run rounds until successful or exception
while True:
covert_server.reset()
# Clean up dead clients
self.clients = [c for c in self.clients if not c.dead]
self.check_client_count()
if self.run_round(covert_server):
break
self.print_error('Ended successfully!')
except FusionError as e:
self.print_error(f"Ended with error: {e}")
except Exception as e:
self.print_error('Failed with exception!')
traceback.print_exc(file=sys.stderr)
for c in self.clients:
c.addjob(clientjob_goodbye, 'internal server error')
finally:
covert_server.stop()
for c in self.clients:
c.addjob(clientjob_goodbye, None)
self.clients = [] # gc
def kick_missing_clients(self, goodclients, reason = None):
baddies = set(self.clients).difference(goodclients)
for c in baddies:
c.kill(reason)
def run_round(self, covert_server):
covert_priv, covert_Upub, covert_Cpub = gen_keypair()
round_pubkey = covert_Cpub
# start to accept covert components
covert_server.start_components(round_pubkey, Params.component_feerate)
# generate blind nonces (slow!)
for c in self.clients:
c.blinds = [schnorr.BlindSigner() for _co in range(Params.num_components)]
lock = threading.Lock()
seen_salthashes = set()
# Send start message to players; record the time we did this
round_time = round(time.time())
collector = ResultsCollector(len(self.clients), done_on_fail = False)
def client_start(c, collector):
with collector:
c.send(pb.StartRound(round_pubkey = round_pubkey,
blind_nonce_points = [b.get_R() for b in c.blinds],
server_time = round_time
))
msg = c.recv('playercommit')
commit_messages = check_playercommit(msg, Params.min_excess_fee, Params.max_excess_fee, Params.num_components)
newhashes = set(m.salted_component_hash for m in commit_messages)
with lock:
expected_len = len(seen_salthashes) + len(newhashes)
seen_salthashes.update(newhashes)
if len(seen_salthashes) != expected_len:
c.error('duplicate component commitment')
if not collector.add((c, msg.initial_commitments, msg.excess_fee)):
c.error("late commitment")
# record for later
c.blind_sig_requests = msg.blind_sig_requests
c.random_number_commitment = msg.random_number_commitment
for client in self.clients:
client.addjob(client_start, collector)
# Record the time that we sent 'startround' message to players; this
# will form the basis of our covert timeline.
covert_T0 = time.monotonic()
self.print_error(f"startround sent at {time.time()}; accepting covert components")
# Await commitment messages then process results
results = collector.gather(deadline = covert_T0 + Protocol.TS_EXPECTING_COMMITMENTS)
# Filter clients who didn't manage to give a good commitment.
prev_client_count = len(self.clients)
self.clients = [c for c, _, _ in results]
self.check_client_count()
self.print_error(f"got commitments from {len(self.clients)} clients (dropped {prev_client_count - len(self.clients)})")
total_excess_fees = sum(f for _,_,f in results)
# Generate scrambled commitment list, but remember exactly where each commitment originated.
commitment_master_list = [(commit, ci, cj) for ci, (_, commitments, _) in enumerate(results) for cj,commit in enumerate(commitments)]
rng.shuffle(commitment_master_list)
all_commitments = tuple(commit for commit,ci,cj in commitment_master_list)
# Send blind signatures
for c in self.clients:
scalars = [b.sign(covert_priv, e) for b,e in zip(c.blinds, c.blind_sig_requests)]
c.addjob(clientjob_send, pb.BlindSigResponses(scalars = scalars))
del c.blinds, c.blind_sig_requests
del results, collector
# Sleep a bit before uploading commitments, as clients are doing this.
remtime = covert_T0 + Protocol.T_START_COMPS - time.monotonic()
if remtime > 0:
time.sleep(remtime)
# Upload the full commitment list; we're a bit generous with the timeout but that's OK.
self.sendall(pb.AllCommitments(initial_commitments = all_commitments),
timeout=Protocol.TS_EXPECTING_COVERT_SIGNATURES)
# Sleep until end of covert components phase
remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_COMPONENTS - time.monotonic()
assert remtime > 0, "timings set up incorrectly"
time.sleep(remtime)
component_master_list = list(covert_server.end_components().items())
self.print_error(f"ending covert component acceptance. {len(component_master_list)} received.")
# Sort the components & contribs list, then separate it out.
component_master_list.sort(key=lambda x:x[1][0])
all_components = [comp for comp, (sort_key, contrib) in component_master_list]
component_contribs = [contrib for comp, (sort_key, contrib) in component_master_list]
del component_master_list
# Do some preliminary checks to see whether we should just skip the
# signing phase and go directly to blame, or maybe even restart / end
# without sharing components.
skip_signatures = False
if len(all_components) != len(self.clients)*Params.num_components:
skip_signatures = True
self.print_error("problem detected: too few components submitted")
if total_excess_fees != sum(component_contribs):
skip_signatures = True
self.print_error("problem detected: excess fee mismatch")
self.last_hash = session_hash = calc_round_hash(self.last_hash, round_pubkey, round_time, all_commitments, all_components)
#TODO : Check the inputs and outputs to see if we even have reasonable
# privacy with what we have.
bad_components = set()
###
if skip_signatures:
self.print_error("skipping covert signature acceptance")
self.sendall(pb.ShareCovertComponents(components = all_components, skip_signatures = True))
else:
self.print_error("starting covert signature acceptance")
tx, input_indices = tx_from_components(all_components, session_hash)
sighashes = [sha256(sha256(bytes.fromhex(tx.serialize_preimage(i, 0x41, use_cache = True))))
for i in range(len(tx.inputs()))]
pubkeys = [bytes.fromhex(inp['pubkeys'][0]) for inp in tx.inputs()]
covert_server.start_signatures(sighashes,pubkeys)
self.sendall(pb.ShareCovertComponents(components = all_components, session_hash = session_hash))
# Sleep until end of covert signatures phase
remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_SIGNATURES - time.monotonic()
if remtime < 0:
# really shouldn't happen, we had plenty of time
raise FusionError("way too slow")
time.sleep(remtime)
signatures = list(covert_server.end_signatures())
missing_sigs = len([s for s in signatures if s is None])
###
self.print_error(f"ending covert signature acceptance. {missing_sigs} missing :{'(' if missing_sigs else ')'}")
# mark all missing-signature components as bad.
bad_inputs = set(i for i,sig in enumerate(signatures) if sig is None)
# further, search for duplicated inputs (through matching the prevout and claimed pubkey).
prevout_spenders = defaultdict(list)
for i, inp in enumerate(tx.inputs()):
prevout_spenders[f"{inp['prevout_hash']}:{inp['prevout_n']} {inp['pubkeys'][0]}"].append(i)
for prevout, spenders in prevout_spenders.items():
if len(spenders) == 1:
continue
self.print_error(f"multi-spend of f{prevout} detected")
# If exactly one of the inputs is signed, we don't punish him
# because he's the honest guy and all the other components were
# just imposters who didn't have private key. If more than one
# signed, then it's malicious behaviour!
if sum((signatures[i] is not None) for i in spenders) != 1:
bad_inputs.update(spenders)
if bad_inputs:
bad_components.update(input_indices[i] for i in bad_inputs)
else:
for i, (inp, sig) in enumerate(zip(tx.inputs(), signatures)):
inp['signatures'][0] = sig.hex() + '41'
assert tx.is_complete()
txid = tx.txid()
self.print_error("completed the transaction! " + txid)
try:
self.network.broadcast_transaction2(tx, timeout=3)
except ServerError as e:
nice_msg, = e.args
server_msg = e.server_msg
self.print_error(f"could not broadcast the transaction! {nice_msg}")
except TimeoutException:
self.print_error("timed out while trying to broadcast transaction! misconfigured?")
# This probably indicates misconfiguration since fusion server ought
# to have a good connection to the EC server. Report this back to clients
# as an 'internal server error'.
raise
else:
self.print_error("broadcast was successful!")
# Give our transaction a small head start in relaying, before sharing the
# signatures. This makes it slightly harder for one of the players to
# broadcast a malleated version by re-signing one of their inputs.
time.sleep(2)
self.sendall(pb.FusionResult(ok = True, txsignatures = signatures))
return True
self.sendall(pb.FusionResult(ok = False, bad_components = sorted(bad_components)))
###
self.print_error(f"entering blame phase. bad components: {bad_components}")
if len(self.clients) < 2:
# Sanity check for testing -- the proof sharing thing doesn't even make sense with one player.
for c in self.clients:
c.kill('blame yourself!')
return
# scan the commitment list and note where each client's commitments ended up
client_commit_indexes = [[None]*Params.num_components for _ in self.clients]
for i, (commit, ci, cj) in enumerate(commitment_master_list):
client_commit_indexes[ci][cj] = i
collector = ResultsCollector(len(self.clients), done_on_fail = False)
def client_get_proofs(client, collector):
with collector:
msg = client.recv('myproofslist')
seed = msg.random_number
if sha256(seed) != client.random_number_commitment:
client.error("seed did not match commitment")
proofs = msg.encrypted_proofs
if len(proofs) != Params.num_components:
client.error("wrong number of proofs")
if any(len(p) > 200 for p in proofs):
client.error("too-long proof") # they should only be 129 bytes long.
# generate the possible destinations list (all commitments, but leaving out the originating client's commitments).
myindex = self.clients.index(client)
possible_commitment_destinations = [(ci,cj) for commit, ci, cj in commitment_master_list if ci != myindex]
N = len(possible_commitment_destinations)
assert N == len(all_commitments) - Params.num_components
# calculate the randomly chosen destinations, same way as client did.
relays = []
for i, proof in enumerate(proofs):
dest_client_idx, dest_key_idx = possible_commitment_destinations[rand_position(seed, N, i)]
src_commitment_idx = client_commit_indexes[myindex][i]
relays.append((proof, src_commitment_idx, dest_client_idx, dest_key_idx))
if not collector.add((client, relays)):
client.error("late proofs")
for client in self.clients:
client.addjob(client_get_proofs, collector)
results = collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT)
# Now, repackage the proofs according to destination.
proofs_to_relay = [list() for _ in self.clients]
for src_client, relays in results:
for proof, src_commitment_idx, dest_client_idx, dest_key_idx in relays:
proofs_to_relay[dest_client_idx].append((proof, src_commitment_idx, dest_key_idx, src_client))
live_clients = len(results)
collector = ResultsCollector(live_clients, done_on_fail = False)
def client_get_blames(client, myindex, proofs, collector):
with collector:
# an in-place sort by source commitment idx removes ordering correlations about which client sent which proof
proofs.sort(key = lambda x:x[1])
client.send(pb.TheirProofsList(proofs = [
dict(encrypted_proof=x, src_commitment_idx=y, dst_key_idx=z)
for x,y,z, _ in proofs]))
msg = client.recv('blames', timeout = Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME)
# More than one blame per proof is malicious. Boot client
# immediately since client may be trying to DoS us by
# making us check many inputs against blockchain.
if len(msg.blames) > len(proofs):
client.error('too many blames')
if len(set(blame.which_proof for blame in msg.blames)) != len(msg.blames):
client.error('multiple blames point to same proof')
# Note, the rest of this function might run for a while if many
# checks against blockchain need to be done, perhaps even still
# running after run_round has exited. For this reason we try to
# not reference self.<variables> that may change.
for blame in msg.blames:
try:
encproof, src_commitment_idx, dest_key_idx, src_client = proofs[blame.which_proof]
except IndexError:
client.kill(f'bad proof index {blame.which_proof} / {len(proofs)}')
continue
src_commit_blob, src_commit_client_idx, _ = commitment_master_list[src_commitment_idx]
dest_commit_blob = all_commitments[client_commit_indexes[myindex][dest_key_idx]]
try:
ret = validate_blame(blame, encproof, src_commit_blob, dest_commit_blob, all_components, bad_components, Params.component_feerate)
except ValidationError as e:
self.print_error("got bad blame; clamed reason was: "+repr(blame.blame_reason))
client.kill(f'bad blame message: {e} (you claimed: {blame.blame_reason!r})')
continue
if isinstance(ret, str):
self.print_error(f"verified a bad proof (for {src_commitment_idx}): {ret}")
src_client.kill(f'bad proof (for {src_commitment_idx}): {ret}')
continue
if src_client.dead:
# If the blamed client is already dead, don't waste more time.
# Since nothing after this point can report back to the
# verifier, there is no privacy leak by the ommission.
continue
assert ret, 'expecting input component'
outpoint = ret.prev_txid[::-1].hex() + ':' + str(ret.prev_index)
try:
check_input_electrumx(self.network, ret)
except ValidationError as e:
reason = f'{e.args[0]} ({outpoint})'
self.print_error(f"blaming[{src_commitment_idx}] for bad input: {reason}")
src_client.kill('you provided a bad input: ' + reason)
continue
except Exception as e:
self.print_error(f"player indicated bad input but checking failed with exception {repr(e)} ({outpoint})")
else:
self.print_error(f"player indicated bad input but it was fine ({outpoint})")
# At this point we could blame the originator, however
# blockchain checks are somewhat subjective. It would be
# appropriate to add some 'ban score' to the player.
# we aren't collecting any results, rather just marking that
# 'checking finished' so that if all blames are checked, we
# can start next round right away.
collector.add(None)
for idx, (client, proofs) in enumerate(zip(self.clients, proofs_to_relay)):
client.addjob(client_get_blames, idx, proofs, collector)
_ = collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME * 2)
self.sendall(pb.RestartRound())
class CovertClientThread(ClientHandlerThread):
def recv(self, *expected_msg_names, timeout=None):
submsg, mtype = recv_pb(self.connection, pb.CovertMessage, *expected_msg_names, timeout=timeout)
return submsg, mtype
def send(self, submsg, timeout=None):
send_pb(self.connection, pb.CovertResponse, submsg, timeout=timeout)
def send_ok(self,):
self.send(pb.OK(), timeout=5)
def send_error(self, msg):
self.send(pb.Error(message = msg), timeout=5)
def error(self, msg):
self.send_error(msg)
raise FusionError(f'Rejected client: {msg}')
class CovertServer(GenericServer):
"""
Server for covert submissions. How it works:
- Launch the server at any time. By default, will bind to an ephemeral port.
- Before start of covert components phase, call start_components.
- To signal the end of covert components phase, owner calls end_components, which returns a dict of {component: contrib}, where contrib is (+- amount - fee).
- Before start of covert signatures phase, owner calls start_signatures.
- To signal the end of covert signatures phase, owner calls end_signatures, which returns a list of signatures (which will have None at positions of missing signatures).
- To reset the server for a new round, call .reset(); to kill all connections, call .stop().
"""
def __init__(self, bindhost, port=0, upnp = None):
super().__init__(bindhost, port, CovertClientThread, upnp = upnp)
self.round_pubkey = None
def start_components(self, round_pubkey, feerate):
self.components = dict()
self.feerate = feerate
self.round_pubkey = round_pubkey
for c in self.spawned_clients:
c.got_submit = False
def end_components(self):
with self.lock:
ret = self.components
del self.components
return ret
def start_signatures(self, sighashes, pubkeys):
num_inputs = len(sighashes)
assert num_inputs == len(pubkeys)
self.signatures = [None]*num_inputs
self.sighashes = sighashes
self.pubkeys = pubkeys
for c in self.spawned_clients:
c.got_submit = False
def end_signatures(self):
with self.lock:
ret = self.signatures
del self.signatures
return ret
def reset(self):
try:
del self.round_pubkey
del self.components
del self.feerate
except AttributeError:
pass
try:
del self.sighashes
del self.pubkeys
except AttributeError:
pass
def new_client_job(self, client):
client.got_submit = False
while True:
msg, mtype = client.recv('component', 'signature', 'ping', timeout = COVERT_CLIENT_TIMEOUT)
if mtype == 'ping':
continue
if client.got_submit:
# We got a second submission before a new phase started. As
# an anti-spam measure we only allow one submission per connection
# per phase.
client.error('multiple submission in same phase')
if mtype == 'component':
try:
round_pubkey = self.round_pubkey
feerate = self.feerate
_ = self.components
except AttributeError:
client.error('component submitted at wrong time')
sort_key, contrib = check_covert_component(msg, round_pubkey, feerate)
with self.lock:
try:
self.components[msg.component] = (sort_key, contrib)
except AttributeError:
client.error('component submitted at wrong time')
else:
assert mtype == 'signature'
try:
sighash = self.sighashes[msg.which_input]
pubkey = self.pubkeys[msg.which_input]
existing_sig = self.signatures[msg.which_input]
except AttributeError:
client.error('signature submitted at wrong time')
except IndexError:
raise ValidationError('which_input too high')
sig = msg.txsignature
if len(sig) != 64:
raise ValidationError('signature length is wrong')
# It might be we already have this signature. This is fine
# since it might be a resubmission after ack failed delivery,
# but we don't allow it to consume our CPU power.
if sig != existing_sig:
if not schnorr.verify(pubkey, sig, sighash):
raise ValidationError('bad transaction signature')
if existing_sig:
# We received a distinct valid signature. This is not
# allowed and we break the connection as a result.
# Note that we could have aborted earlier but this
# way third parties can't abuse us to find out the
# timing of a given input's signature submission.
raise ValidationError('conflicting valid signature')
with self.lock:
try:
self.signatures[msg.which_input] = sig
except AttributeError:
client.error('signature submitted at wrong time')
client.send_ok()
client.got_submit = True
| 45.310448
| 174
| 0.599798
| 5,520
| 45,537
| 4.811594
| 0.175725
| 0.011182
| 0.012651
| 0.007907
| 0.182417
| 0.135354
| 0.111822
| 0.083057
| 0.059676
| 0.033095
| 0
| 0.008967
| 0.324044
| 45,537
| 1,004
| 175
| 45.355578
| 0.853871
| 0.224038
| 0
| 0.254335
| 0
| 0.00289
| 0.074332
| 0.00648
| 0
| 0
| 0.000343
| 0.001992
| 0.011561
| 1
| 0.066474
| false
| 0.00289
| 0.021676
| 0.00289
| 0.15896
| 0.037572
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e014451ff2d26b3e408bb00a4f1a954adc75daa5
| 2,229
|
py
|
Python
|
Excercici4Package/ex4.py
|
jtorrenth/CienciaDades
|
81f005ed1ddcc218dcde8c5e2f1a297444389a82
|
[
"MIT"
] | null | null | null |
Excercici4Package/ex4.py
|
jtorrenth/CienciaDades
|
81f005ed1ddcc218dcde8c5e2f1a297444389a82
|
[
"MIT"
] | null | null | null |
Excercici4Package/ex4.py
|
jtorrenth/CienciaDades
|
81f005ed1ddcc218dcde8c5e2f1a297444389a82
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
def countvalues(dataframe, subject):
# Filtrem i tractem el dataset
economydf = filtrar(dataframe, "economy")
# el printem
printar(economydf, subject)
# Filtrem ara per subject infected i ho desem en un altre df
infectedf = filtrar(dataframe, "infected")
# Calculem els percentatjes
percentvery = (infectedf['ppl_very'].sum()/infectedf['sample_size'].sum())*100
percentnotatall = (infectedf['ppl_not_at_all'].sum() / infectedf['sample_size'].sum()) * 100
# Els printem
print("percentatge very: {}%".format(percentvery))
print("percentatge not_at_all: {}%".format(percentnotatall))
grafic4('People_Very', 'People_Not_At_All', percentvery, percentnotatall, " % Persones", "Satisfacció", "% de persones preocupades o no per infected")
def printar(df, subject):
# Printem a la consola els valors
print("Valors per subject {}".format(subject))
pplvery = df['ppl_very'].sum()
pplnot = df['ppl_not_at_all'].sum()
print("Very: {}".format(pplvery))
print("Not at All: {}".format(pplnot))
# Finalment, grafiquem
# Cal tancar el grafic per a seguir amb l'execució
grafic4('People_Very', 'People_Not_At_All', pplvery, pplnot, "Persones", "Satisfacció", "Nombre de persones preocupades o no per l'economia")
def filtrar(dataframe, subject1):
df = dataframe[dataframe['subject'].str.contains(subject1, case=False)].copy()
# Afegim els valors en funció del samplesize a dues noves columnes
df['ppl_very'] = df['very'] / 100 * df['sample_size']
df['ppl_not_at_all'] = df['not_at_all'] / 100 * df['sample_size']
return df
def grafic4(label1, label2, valor1, valor2, leyenday, leyendax, titulo):
# Declaramos valors per l'eix x
eje_x = [label1, label2]
# Declaramos valors per l'eix y
eje_y = [valor1, valor2]
# Fem la grafica
plt.bar(eje_x, eje_y)
# Llegenda de l'eix x
plt.ylabel(leyenday)
# Legenda en el eje x
plt.xlabel(leyendax)
# Títol de Grafica
plt.title(titulo)
# Mostrem Grafica
plt.show()
#Funcio per a l'excercici 4.4
def grades(df):
df['538 Grade']=df['538 Grade'].str[0]
print(df.groupby('538 Grade').size())
| 31.842857
| 154
| 0.674742
| 303
| 2,229
| 4.858086
| 0.382838
| 0.027174
| 0.043478
| 0.022418
| 0.175272
| 0.116848
| 0.04212
| 0
| 0
| 0
| 0
| 0.02051
| 0.190668
| 2,229
| 69
| 155
| 32.304348
| 0.795455
| 0.214895
| 0
| 0
| 0
| 0
| 0.262428
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151515
| false
| 0
| 0.030303
| 0
| 0.212121
| 0.242424
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0161b99cffb06588c8cd2a39e9f07abf59540ea
| 18,987
|
bzl
|
Python
|
build/rules.bzl
|
filmil/bazel-ebook
|
433f1e157c6c1b7867abf72bc0e882c07477d60d
|
[
"Apache-2.0"
] | 9
|
2020-05-31T10:24:57.000Z
|
2021-12-21T10:07:51.000Z
|
build/rules.bzl
|
filmil/bazel-ebook
|
433f1e157c6c1b7867abf72bc0e882c07477d60d
|
[
"Apache-2.0"
] | 2
|
2021-11-09T23:25:01.000Z
|
2021-11-10T08:42:22.000Z
|
build/rules.bzl
|
filmil/bazel-ebook
|
433f1e157c6c1b7867abf72bc0e882c07477d60d
|
[
"Apache-2.0"
] | 2
|
2020-06-03T13:21:33.000Z
|
2021-12-01T20:17:46.000Z
|
# Copyright (C) 2020 Google Inc.
#
# This file has been licensed under Apache 2.0 license. Please see the LICENSE
# file at the root of the repository.
# Build rules for building ebooks.
# This is the container
CONTAINER = "filipfilmar/ebook-buildenv:1.1"
# Use this for quick local runs.
#CONTAINER = "ebook-buildenv:local"
EbookInfo = provider(fields=["figures", "markdowns"])
# Returns the docker_run script invocation command based on the
# script path and its reference directory.
#
# Params:
# script_path: (string) The full path to the script to invoke
# dir_reference: (string) The path to a file used for figuring out
# the reference directories (build root and repo root).
def _script_cmd(script_path, dir_reference):
return """\
{script} \
--container={container} \
--dir-reference={dir_reference}""".format(
script=script_path,
container=CONTAINER,
dir_reference=dir_reference,
)
def _drawtiming_png_impl(ctx):
cmd = "drawtiming"
docker_run = ctx.executable._script
figures = []
for target in ctx.attr.srcs:
for src in target.files.to_list():
in_file = src
out_file = ctx.actions.declare_file(in_file.basename + ".png")
figures += [out_file]
script_cmd = _script_cmd(docker_run.path, in_file.path)
ctx.actions.run_shell(
progress_message = "timing diagram to PNG with {1}: {0}".format(in_file.short_path, cmd),
inputs = [in_file],
outputs = [out_file],
tools = [docker_run],
command = """\
{script} \
{cmd} --output "{out_file}" "{in_file}"
""".format(
cmd=cmd,
out_file=out_file.path,
in_file=in_file.path,
script=script_cmd),
)
deps = []
for target in ctx.attr.deps:
ebook_provider = target[EbookInfo]
if not ebook_provider:
continue
deps += ebook_provider.figures
runfiles = ctx.runfiles(files = figures)
return [
EbookInfo(figures=figures+deps, markdowns=[]),
DefaultInfo(files=depset(figures+deps), runfiles=runfiles),
]
drawtiming_png = rule(implementation = _drawtiming_png_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".t"],
doc = "The file to compile",
),
"deps": attr.label_list(
doc = "The dependencies, any targets should be allowed",
),
"output": attr.output(doc="The generated file"),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Transform a timing diagram file into png using drawtiming",
)
def _generalized_graphviz_rule_impl(ctx, cmd):
docker_run = ctx.executable._script
figures = []
for target in ctx.attr.srcs:
for src in target.files.to_list():
in_file = src
out_file = ctx.actions.declare_file(in_file.basename + ".png")
figures += [out_file]
script_cmd = _script_cmd(docker_run.path, in_file.path)
ctx.actions.run_shell(
progress_message = "graphviz to PNG with {1}: {0}".format(in_file.short_path, cmd),
inputs = [in_file],
outputs = [out_file],
tools = [docker_run],
command = """\
{script} \
{cmd} -Tpng -o "{out_file}" "{in_file}"
""".format(
cmd=cmd,
out_file=out_file.path,
in_file=in_file.path,
script=script_cmd),
)
deps = []
for target in ctx.attr.deps:
ebook_provider = target[EbookInfo]
if not ebook_provider:
continue
deps += ebook_provider.figures
runfiles = ctx.runfiles(files = figures)
return [
EbookInfo(figures=figures+deps, markdowns=[]),
DefaultInfo(files=depset(figures+deps), runfiles=runfiles),
]
def _neato_png_impl(ctx):
return _generalized_graphviz_rule_impl(ctx, "neato")
neato_png = rule(implementation = _neato_png_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".dot"],
doc = "The file to compile",
),
"deps": attr.label_list(
doc = "The dependencies, any targets should be allowed",
),
"output": attr.output(doc="The generated file"),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Transform a graphviz dot file into png using neato",
)
def _dot_png_impl(ctx):
return _generalized_graphviz_rule_impl(ctx, "dot")
dot_png = rule(implementation = _dot_png_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".dot"],
doc = "The file to compile",
),
"deps": attr.label_list(
doc = "The dependencies, any targets should be allowed",
),
"output": attr.output(doc="The generated file"),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Transform a graphviz dot file into png using dot",
)
def _asymptote_impl(ctx):
asycc = ctx.executable._script
figures = []
for target in ctx.attr.srcs:
for src in target.files.to_list():
in_file = src
out_file = ctx.actions.declare_file(in_file.basename + ".png")
figures += [out_file]
script_cmd = _script_cmd(asycc.path, in_file.path)
ctx.actions.run_shell(
progress_message = "ASY to PNG: {0}".format(in_file.short_path),
inputs = [in_file],
outputs = [out_file],
tools = [asycc],
command = """\
{script} \
asy -render 5 -f png -o "{out_file}" "{in_file}"
""".format(
out_file=out_file.path, in_file=in_file.path, script=script_cmd),
)
deps = []
for target in ctx.attr.deps:
ebook_provider = target[EbookInfo]
if not ebook_provider:
continue
deps += ebook_provider.figures
runfiles = ctx.runfiles(files=figures+deps)
for dep in ctx.attr.deps:
runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)
return [
EbookInfo(figures=figures+deps, markdowns=[]),
DefaultInfo(files=depset(figures+deps), runfiles=runfiles),
]
asymptote = rule(implementation = _asymptote_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".asy"],
doc = "The file to compile",
),
"deps": attr.label_list(
doc = "The dependencies, any targets should be allowed",
),
"output": attr.output(doc="The generated file"),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Transform an asymptote file into png",
)
def _copy_file_to_workdir_renamed(ctx, src):
src_copy = ctx.actions.declare_file("{}_{}".format(ctx.label.name, src.short_path))
ctx.actions.run_shell(
progress_message = "Copying {} to {}".format(src.short_path, src_copy.short_path),
outputs = [src_copy],
inputs = [src],
command="cp {} {}".format(src.path, src_copy.path),
)
return src_copy
def _copy_file_to_workdir(ctx, src):
src_copy = ctx.actions.declare_file(src.basename)
ctx.actions.run_shell(
progress_message = "Copying {}".format(src.short_path),
outputs = [src_copy],
inputs = [src],
command="cp {} {}".format(src.path, src_copy.path),
)
return src_copy
def _markdown_lib_impl(ctx):
markdowns = []
for target in ctx.attr.srcs:
for src in target.files.to_list():
markdowns += [_copy_file_to_workdir(ctx, src)]
figures = []
for target in ctx.attr.deps:
provider = target[EbookInfo]
figures += (provider.figures or [])
markdowns += (provider.markdowns or [])
runfiles = ctx.runfiles(files=figures+markdowns)
for dep in ctx.attr.deps:
runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)
return [
EbookInfo(figures=figures, markdowns=markdowns),
DefaultInfo(
files=depset(figures+markdowns),
runfiles=runfiles,
),
]
markdown_lib = rule(
implementation = _markdown_lib_impl,
doc = "Declares a set of markdown files",
attrs = {
"srcs": attr.label_list(
allow_files = [".md"],
doc = "The markdown source files",
),
"deps": attr.label_list(
doc = "The file to compile",
providers = [EbookInfo],
),
},
)
def _ebook_epub_impl(ctx):
name = ctx.label.name
# This is duplicated in _ebook_pdf_impl.
# steps
# run htex on all *md, gives book.htex
markdowns = []
figures = []
for dep in ctx.attr.deps:
provider = dep[EbookInfo]
markdowns += provider.markdowns
figures += provider.figures
dir_reference = markdowns[0]
htex_file = ctx.actions.declare_file("{}.htex".format(name))
markdowns_paths = [file.path for file in markdowns]
markdowns_paths_stripped = _strip_reference_dir_from_files(dir_reference, markdowns)
script = ctx.executable._script
script_cmd = _script_cmd(script.path, markdowns_paths[0])
ctx.actions.run_shell(
progress_message = "Building equation environments for: {}".format(name),
inputs = markdowns,
outputs = [htex_file],
tools = [script],
command = """\
{script} \
pandoc -s --gladtex -o {target} {sources} \
""".format(
script=script_cmd,
target=htex_file.path,
sources=" ".join(markdowns_paths))
)
# run gladtex on the resulting htex to obtain html and output directory with figures.
outdir = ctx.actions.declare_directory("{}.eqn".format(name))
html_file = ctx.actions.declare_file("{}.html".format(name))
ctx.actions.run_shell(
progress_message = "Extracting equations for: {}".format(name),
inputs = [htex_file],
outputs = [outdir, html_file],
tools = [script],
command = """\
{script} --cd-to-dir-reference \
gladtex -r 200 -d {outdir} {htex_file} \
""".format(
script=script_cmd,
outdir=_strip_reference_dir(dir_reference, outdir.path),
htex_file=_strip_reference_dir(dir_reference, htex_file.path),
)
)
outdir_tar = ctx.actions.declare_file("{}.tar".format(outdir.basename))
tar_command = "(cd {base} ; tar cf {archive} {dir})".format(
base=outdir_tar.dirname,
archive=outdir_tar.basename,
dir=outdir.basename)
ctx.actions.run_shell(
progress_message = "Archiving equations: {}".format(outdir_tar.short_path),
inputs = [outdir],
outputs = [outdir_tar],
command = tar_command,
)
# run htexepub to obtain book.epub.
# This is gonna be fun!
epub_metadata = ctx.attr.metadata_xml.files.to_list()[0]
epub_metadata = _copy_file_to_workdir_renamed(ctx, epub_metadata)
title_yaml = ctx.attr.title_yaml.files.to_list()[0]
title_yaml = _copy_file_to_workdir_renamed(ctx, epub_metadata)
ebook_epub = ctx.actions.declare_file("{}.epub".format(name))
inputs = [epub_metadata, title_yaml, html_file, outdir, outdir_tar] + markdowns + figures
ctx.actions.run_shell(
progress_message = "Building EPUB for: {}".format(name),
inputs = inputs,
tools = [script],
outputs = [ebook_epub],
command = """\
{script} --cd-to-dir-reference \
pandoc --epub-metadata={epub_metadata} \
-f html -t epub3 -o {ebook_epub} {html_file} \
""".format(
script=script_cmd,
epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path),
ebook_epub=_strip_reference_dir(dir_reference, ebook_epub.path),
html_file=_strip_reference_dir(dir_reference, html_file.path),
))
runfiles = ctx.runfiles(files=[ebook_epub])
for dep in ctx.attr.deps:
runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)
return [
dep[EbookInfo],
DefaultInfo(
files=depset([ebook_epub, outdir, outdir_tar]),
runfiles=runfiles,
)
]
ebook_epub = rule(
implementation = _ebook_epub_impl,
attrs = {
"deps": attr.label_list(
doc = "All the targets you need to make this book work.",
providers = [EbookInfo],
),
"title_yaml": attr.label(
allow_files = True,
doc = "The title.yaml file to use for this book",
),
"metadata_xml": attr.label(
allow_files = True,
doc = "The epub-metadata.xml file to use for this book",
),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Generate an ebook in EPUB format"
)
def _strip_reference_dir(reference_dir, path):
return path.replace(reference_dir.dirname+"/", "")
def _strip_reference_dir_from_files(reference_dir, files):
return [ _strip_reference_dir(reference_dir, file.path) for file in files]
def _ebook_pdf_impl(ctx):
name = ctx.label.name
# steps
# run htex on all *md, gives book.htex
markdowns = []
figures = []
for dep in ctx.attr.deps:
provider = dep[EbookInfo]
markdowns += provider.markdowns
figures += provider.figures
dir_reference = markdowns[0]
# Fixed up paths -- relative to the directory dir_reference, not the
# directory where the build happens! This is needed because we can not control
# figure inclusion.
markdowns_paths = _strip_reference_dir_from_files(dir_reference, markdowns)
script = ctx.executable._script
script_cmd = _script_cmd(script.path, dir_reference.path)
# run htexepub to obtain book.epub.
# This is gonna be fun!
epub_metadata = ctx.attr.metadata_xml.files.to_list()[0]
epub_metadata = _copy_file_to_workdir(ctx, epub_metadata)
title_yaml = ctx.attr.title_yaml.files.to_list()[0]
title_yaml = _copy_file_to_workdir(ctx, title_yaml)
ebook_pdf = ctx.actions.declare_file("{}.pdf".format(name))
inputs = [epub_metadata, title_yaml] + markdowns + figures
ctx.actions.run_shell(
progress_message = "Building PDF for: {}".format(name),
inputs = inputs,
tools = [script],
outputs = [ebook_pdf],
command = """\
{script} --cd-to-dir-reference \
pandoc --epub-metadata={epub_metadata} \
--mathml -o {ebook_pdf} {markdowns} \
""".format(
script=script_cmd,
epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path),
ebook_pdf=_strip_reference_dir(dir_reference, ebook_pdf.path),
markdowns=" ".join(markdowns_paths),
))
runfiles = ctx.runfiles(files=[ebook_pdf])
for dep in ctx.attr.deps:
runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)
return [
DefaultInfo(
files=depset([ebook_pdf]),
runfiles=runfiles,
)
]
ebook_pdf = rule(
implementation = _ebook_pdf_impl,
attrs = {
"deps": attr.label_list(
doc = "All the targets you need to make this book work.",
providers = [EbookInfo],
),
"title_yaml": attr.label(
allow_files = True,
doc = "The title.yaml file to use for this book",
),
"metadata_xml": attr.label(
allow_files = True,
doc = "The epub-metadata.xml file to use for this book",
),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Generate an ebook in PDF format"
)
def _ebook_kindle_impl(ctx):
mobi_file = ctx.actions.declare_file("{}.mobi".format(ctx.label.name))
# First provider is EbookInfo, second is DefaultInfo.
(ebook_info, default_info) = _ebook_epub_impl(ctx)
# There can be only one such file
outputs = default_info.files.to_list()
epub_file = outputs[0]
equation_outdir = outputs[1]
equation_outdir_tar = outputs[2]
captured_output = ctx.actions.declare_file(
"{}.untar-out".format(ctx.label.name))
# untar the equation dir
# Maybe this is not needed.
tar_command = "(cd {base} ; tar xvf {archive}) > {output}".format(
base=equation_outdir_tar.dirname,
archive=equation_outdir_tar.basename,
output=captured_output.path)
ctx.actions.run_shell(
progress_message = "Unarchiving equations: {}".format(equation_outdir_tar.short_path),
inputs = [equation_outdir_tar],
outputs = [captured_output],
command = tar_command,
)
dir_reference = epub_file
script = ctx.executable._script
name = ctx.label.name
script_cmd = _script_cmd(script.path, epub_file.path)
ctx.actions.run_shell(
progress_message = "Building MOBI for: {}".format(name),
inputs = [epub_file, equation_outdir],
tools = [script],
outputs = [mobi_file],
command = """\
{script} --cd-to-dir-reference \
ebook-convert {epub_file} {mobi_file} \
""".format(
script=script_cmd,
epub_file=_strip_reference_dir(dir_reference, epub_file.path),
mobi_file=_strip_reference_dir(dir_reference, mobi_file.path),
))
runfiles = ctx.runfiles(files=[mobi_file])
for dep in ctx.attr.deps:
runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)
return [
DefaultInfo(
files=depset([mobi_file, captured_output]),
runfiles=runfiles,
)
]
ebook_kindle = rule(
implementation = _ebook_kindle_impl,
attrs = {
"deps": attr.label_list(
doc = "All the targets you need to make this book work.",
providers = [EbookInfo],
),
"title_yaml": attr.label(
allow_files = True,
doc = "The title.yaml file to use for this book",
),
"metadata_xml": attr.label(
allow_files = True,
doc = "The epub-metadata.xml file to use for this book",
),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Generate an ebook in the Kindle's MOBI format"
)
| 32.126904
| 103
| 0.599357
| 2,213
| 18,987
| 4.921826
| 0.105739
| 0.030848
| 0.012394
| 0.023136
| 0.685549
| 0.636339
| 0.567297
| 0.534613
| 0.518638
| 0.48733
| 0
| 0.002063
| 0.285195
| 18,987
| 590
| 104
| 32.181356
| 0.800472
| 0.0622
| 0
| 0.611691
| 0
| 0
| 0.172181
| 0.012942
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029228
| false
| 0
| 0
| 0.010438
| 0.058455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0164a1f4fee849a8bca46fb970244ecbfd603fe
| 715
|
py
|
Python
|
1094 EXPERIENCIAS.py
|
castrolimoeiro/Uri-exercise
|
7a9227c55a79f14fe8bde4aa0ebb4c268bbda4bb
|
[
"MIT"
] | null | null | null |
1094 EXPERIENCIAS.py
|
castrolimoeiro/Uri-exercise
|
7a9227c55a79f14fe8bde4aa0ebb4c268bbda4bb
|
[
"MIT"
] | null | null | null |
1094 EXPERIENCIAS.py
|
castrolimoeiro/Uri-exercise
|
7a9227c55a79f14fe8bde4aa0ebb4c268bbda4bb
|
[
"MIT"
] | null | null | null |
n = int(input())
coelho = rato = sapo = contador = 0
for i in range(0, n):
q, t = input().split(' ')
t = t.upper()
q = int(q)
if 1 <= q <= 15:
contador += q
if t == 'C':
coelho += q
elif t == 'R':
rato += q
elif t == 'S':
sapo += q
porccoelho = (coelho * 100) / contador
porcrato = (rato * 100) / contador
porcsapo = (sapo * 100) / contador
print(f'Total: {contador} cobaias')
print(f'Total de coelhos: {coelho}')
print(f'Total de ratos: {rato}')
print(f'Total de sapos: {sapo}')
print(f'Percentual de coelhos: {porccoelho:.2f} %')
print(f'Percentual de ratos: {porcrato:.2f} %')
print(f'Percentual de sapos: {porcsapo:.2f} %')
| 25.535714
| 51
| 0.544056
| 101
| 715
| 3.851485
| 0.366337
| 0.107969
| 0.113111
| 0.100257
| 0.102828
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032755
| 0.274126
| 715
| 27
| 52
| 26.481481
| 0.716763
| 0
| 0
| 0
| 0
| 0
| 0.299301
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.291667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e018a8edf8d16988caad3f9660a381b73b1f97c4
| 17,156
|
py
|
Python
|
tibanna/top.py
|
4dn-dcic/tibanna
|
bb84597c425a481a230be30cb0ed9b99c774e53d
|
[
"MIT"
] | 62
|
2017-02-16T02:16:22.000Z
|
2022-02-07T08:26:12.000Z
|
tibanna/top.py
|
4dn-dcic/tibanna
|
bb84597c425a481a230be30cb0ed9b99c774e53d
|
[
"MIT"
] | 77
|
2017-10-26T20:17:35.000Z
|
2022-03-25T22:56:32.000Z
|
tibanna/top.py
|
4dn-dcic/tibanna
|
bb84597c425a481a230be30cb0ed9b99c774e53d
|
[
"MIT"
] | 19
|
2017-01-27T16:37:37.000Z
|
2021-12-12T13:52:01.000Z
|
import datetime
class Top(object):
"""class TopSeries stores the information of a series of top commands
::
echo -n 'Timestamp: '; date +%F-%H:%M:%S
top -b -n1 [-i] [-c]
over short intervals to monitor the same set of processes over time.
An example input content looks like below, or a series of these.
The initialization works at any time interval and can be used as a generic
class, but the class is designed for the output of a regular top commands above
run at about 1-minute intervals, which is performed by awsf3 on an AWSEM instance
through cron jobs. (some can be skipped but there should be no more than 1 per minute).
This top output can be obtained through ``tibanna log -j <job_id> -t`` or through
API ``API().log(job_id=<job_id>, top=True)``.
::
Timestamp: 2020-12-18-18:55:37
top - 18:55:37 up 4 days, 2:37, 0 users, load average: 5.59, 5.28, 5.76
Tasks: 7 total, 1 running, 6 sleeping, 0 stopped, 0 zombie
%Cpu(s): 6.6 us, 0.1 sy, 0.0 ni, 93.2 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
KiB Mem : 12971188+total, 10379019+free, 20613644 used, 5308056 buff/cache
KiB Swap: 0 total, 0 free, 0 used. 10834606+avail Mem
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
712 root 20 0 36.464g 8.223g 19572 S 100.0 6.6 125:55.12 java -Xmx32g -Xms32g -jar juicer_tools.jar addNorm -w 1000 -d -F out.hic
17919 ubuntu 20 0 40676 3828 3144 R 6.2 0.0 0:00.01 top -b -n1 -c -i -w 10000
The default timestamp from top output does not contain dates, which can screw up multi-day processes
which is common for bioinformatics pipelines. So, an extra timestamp is added before each top command.
To parse top output content, simply create an object. This will create processes attribute,
which is a raw parsed result organized by time stamps.
::
top = Top(top_output_content)
To reorganize the contents by commands, run digest. By default, the max number of commands is 16,
and if there are more than 16 unique commands, they will be collapsed into prefixes.
::
top.digest()
To write a csv / tsv file organized by both timestamps (rows) and commands (columns),
use :func: write_to_csv.
::
top.write_to_csv(...)
"""
# assume this format for timestamp
timestamp_format = '%Y-%m-%d-%H:%M:%S'
# These commands are excluded when parsing the top output
# Currently only 1-, 2- or 3-word prefixes work.
exclude_list = ['top', 'docker', 'dockerd', '/usr/bin/dockerd', 'cron',
'docker-untar', 'containerd', 'goofys-latest', 'cwltool',
'/usr/bin/containerd-shim-runc-v2', 'goofys', 'nodejs --eval',
'/usr/bin/python3 /usr/local/bin/cwltool', 'containerd-shim',
'/usr/bin/python3 /bin/unattended-upgrade',
'/usr/bin/python3 /usr/local/bin/awsf3',
'/usr/bin/python3 /usr/local/bin/aws s3',
'java -jar /usr/local/bin/cromwell.jar',
'java -jar /usr/local/bin/cromwell-35.jar']
def __init__(self, contents):
"""initialization parsed top output content and
creates processes which is a dictionary with timestamps as keys
and a list of Process class objects as a value.
It also creates empty attributes timestamps, commands, cpus and mems
which can be filled through method :func: digest.
"""
self.processes = dict()
self.timestamps = []
self.commands = []
self.cpus = dict()
self.mems = dict()
self.parse_contents(contents)
def parse_contents(self, contents):
is_in_table = False
for line in contents.splitlines():
if line.startswith('Timestamp:'):
timestamp = line.split()[1]
continue
if line.lstrip().startswith('PID'):
is_in_table = True
continue
if not line or line.isspace():
is_in_table = False
if is_in_table:
if timestamp not in self.processes:
self.processes[timestamp] = []
process = Process(line)
if not self.should_skip_process(process):
self.processes[timestamp].append(Process(line))
def digest(self, max_n_commands=16, sort_by='alphabetical'):
"""Fills in timestamps, commands, cpus and mems attributes
from processes attribute.
:param max_n_commands: When the number of unique commands exceeds
this value, they are collapsed into unique prefixes.
:sort_by: alphabetical|cpu|mem The commands are by default sorted
alphabetically, but optionally can be sorted by total cpus or total
mem (in reverser order) (e.g. the first command consumed the most cpu)
"""
# Reinitializat these so that you get the same results if you run it twice
self.timestamps = []
self.commands = []
self.cpus = dict()
self.mems = dict()
# First fill in commands from commands in processes (and collapse if needed.)
self.commands = self.get_collapsed_commands(max_n_commands)
# Fill in timestamps, cpus and mems from processes, matching collapsed commands.
self.nTimepoints = len(self.processes)
timestamp_ind = 0
for timestamp in sorted(self.processes):
# sorted timestamps (columns)
self.timestamps.append(timestamp)
# commands (rows)
for process in self.processes[timestamp]:
# find a matching collapsed command (i.e. command prefix) and use that as command.
command = Top.convert_command_to_collapsed_command(process.command, self.commands)
if command not in self.cpus:
self.cpus[command] = [0] * self.nTimepoints
self.mems[command] = [0] * self.nTimepoints
self.cpus[command][timestamp_ind] += process.cpu
self.mems[command][timestamp_ind] += process.mem
timestamp_ind += 1
# sort commands according to total cpu
self.sort_commands(by=sort_by)
def get_collapsed_commands(self, max_n_commands):
"""If the number of commands exceeds max_n_commands,
return a collapsed set of commands
that consists of prefixes of commands so that
the total number is within max_n_commands.
First decide the number of words from the beginning of the commands
to collapse commands that start with the same words, i.e.
find the maximum number of words that makes the number of unique commands to be
bounded by max_n_commands.
If using only the first word is not sufficient, go down to the characters of
the first word. If that's still not sufficient, collapse all of them into a single
command ('all_commands')
After the collapse, commands that are unique to a collapsed prefix are
extended back to the original command.
"""
all_commands = set()
for timestamp in self.processes:
all_commands.update(set([pr.command for pr in self.processes[timestamp]]))
if len(all_commands) <= max_n_commands:
# no need to collapse
return list(all_commands)
# decide the number of words from the beginning of the commands
# to collapse commands starting with the same words
all_cmd_lengths = [len(cmd.split()) for cmd in all_commands] # number of words per command
max_cmd_length = max(all_cmd_lengths)
min_cmd_length = min(all_cmd_lengths)
collapsed_len = max_cmd_length - 1
n_commands = len(all_commands)
while(n_commands > max_n_commands and collapsed_len > 1):
reduced_commands = set()
for cmd in all_commands:
reduced_commands.add(Top.first_words(cmd, collapsed_len))
n_commands = len(reduced_commands)
collapsed_len -= 1
# went down to the first words but still too many commands - start splitting characters then
if n_commands > max_n_commands:
all_cmd_lengths = [len(cmd.split()[0]) for cmd in all_commands] # number of characters of the first word
max_cmd_length = max(all_cmd_lengths)
min_cmd_length = min(all_cmd_lengths)
collapsed_len = max_cmd_length - 1
while(n_commands > max_n_commands and collapsed_len > 1):
reduced_commands = set()
for cmd in all_commands:
reduced_commands.add(Top.first_characters(cmd.split()[0], collapsed_len))
n_commands = len(reduced_commands)
collapsed_len -= 1
if n_commands > max_n_commands:
return ['all_commands']
else:
# extend reduced commands that don't need to be reduced
for r_cmd in list(reduced_commands): # wrap in list so that we can remove elements in the loop
uniq_cmds = [cmd for cmd in all_commands if cmd.startswith(r_cmd)]
if len(uniq_cmds) == 1:
reduced_commands.remove(r_cmd)
reduced_commands.add(uniq_cmds[0])
return reduced_commands
def write_to_csv(self, csv_file, metric='cpu', delimiter=',', colname_for_timestamps='timepoints',
timestamp_start=None, timestamp_end=None, base=0):
"""write metrics as csv file with commands as columns
:param metric: 'cpu' or 'mem'
:param delimiter: default ','
:param colname_for_timestamps: colunm name for the timepoint column (1st column). default 'timepoints'
:param timestamp_start: start time in the same timestamp format (e.g. 01:23:45),
time stamps will be converted to minutes since start time.
The report starts with minute 0.
Time points with no top records will be filled with 0.
If not specified, the first timestamp in the top commands will be used.
:param timestamp_end: end time in the same timestamp format (e.g. 01:23:45),
The reports will be generated only up to the end time.
Time points with no top records will be filled with 0.
If not specified, the last timestamp in the top commands will be used.
:param base: default 0. If 0, minutes start with 0, if 1, minutes are 1-based (shifted by 1).
"""
metric_array = getattr(self, metric + 's')
if self.timestamps:
if not timestamp_start:
timestamp_start = self.timestamps[0]
if not timestamp_end:
timestamp_end = self.timestamps[-1]
timestamps_as_minutes = self.timestamps_as_minutes(timestamp_start)
last_minute = self.as_minutes(timestamp_end, timestamp_start)
else: # default when timestamps is not available (empty object)
timestamps_as_minutes = range(0, 5)
last_minute = 5
with open(csv_file, 'w') as fo:
# header
# we have to escape any double quotes that are present in the cmd, before wrapping it in double quotes. Otherwise we
# will get incorrect column counts when creating the metrics report.
fo.write(delimiter.join([colname_for_timestamps] + [Top.wrap_in_double_quotes(cmd.replace('"', '""')) for cmd in self.commands]))
fo.write('\n')
# contents
# skip timepoints earlier than timestamp_start
for i in range(0, len(timestamps_as_minutes)):
if timestamps_as_minutes[i] >= 0:
break
for clock in range(0, last_minute + 1):
clock_shifted = clock + base
if i < len(timestamps_as_minutes) and timestamps_as_minutes[i] == clock:
fo.write(delimiter.join([str(clock_shifted)] + [str(metric_array[cmd][i]) for cmd in self.commands]))
i += 1
else:
fo.write(delimiter.join([str(clock_shifted)] + ['0' for cmd in self.commands])) # add 0 for timepoints not reported
fo.write('\n')
def should_skip_process(self, process):
"""A predicate function to check if the process should be skipped (excluded).
It returns True if the input process should be skipped.
e.g. the top command itself is excluded, as well as docker, awsf3, cwltool, etc.
the list to be excluded is in self.exclude_list.
It compares either first word or first two or three words only.
Kernel threads (single-word commands wrapped in bracket (e.g. [perl]) are also excluded.
"""
first_word = Top.first_words(process.command, 1)
first_two_words = Top.first_words(process.command, 2)
first_three_words = Top.first_words(process.command, 3)
if first_word in self.exclude_list:
return True
elif first_two_words in self.exclude_list:
return True
elif first_three_words in self.exclude_list:
return True
if first_word.startswith('[') and first_word.endswith(']'):
return True
return False
@staticmethod
def convert_command_to_collapsed_command(cmd, collapsed_commands):
if collapsed_commands == 'all_commands': # collapsed to one command
return 'all_commands'
elif cmd in collapsed_commands: # not collapsed
return cmd
else: # collapsed to prefix
all_prefixes = [_ for _ in collapsed_commands if cmd.startswith(_)]
longest_prefix = sorted(all_prefixes, key=lambda x: len(x), reverse=True)[0]
return longest_prefix
def total_cpu_per_command(self, command):
return sum([v for v in self.cpus[command]])
def total_mem_per_command(self, command):
return sum([v for v in self.mems[command]])
def sort_commands(self, by='cpu'):
"""sort self.commands by total cpu (default) or mem in reverse order,
or alphabetically (by='alphabetical')"""
if by == 'cpu':
self.commands = sorted(self.commands, key=lambda x: self.total_cpu_per_command(x), reverse=True)
elif by == 'mem':
self.commands = sorted(self.commands, key=lambda x: self.total_mem_per_command(x), reverse=True)
elif by == 'alphabetical':
self.commands = sorted(self.commands)
@classmethod
def as_minutes(cls, timestamp, timestamp_start):
"""timestamp as minutes since timestamp_start.
:param timestamp: given timestamp in the same format (e.g. 01:23:45)
:param timestamp_start: start timestamp in the same format (e.g. 01:20:45)
In the above example, 3 will be the return value.
"""
dt = cls.as_datetime(timestamp)
dt_start = cls.as_datetime(timestamp_start)
# negative numbers are not supported by timedelta, so do each case separately
if dt > dt_start:
return round((dt - dt_start).seconds / 60)
else:
return -round((dt_start - dt).seconds / 60)
def timestamps_as_minutes(self, timestamp_start):
"""convert self.timestamps to a list of minutes since timestamp_start
:param timestamp_start: timestamp in the same format (e.g. 01:23:45)
"""
return [self.as_minutes(t, timestamp_start) for t in self.timestamps]
@classmethod
def as_datetime(cls, timestamp):
return datetime.datetime.strptime(timestamp, cls.timestamp_format)
@staticmethod
def wrap_in_double_quotes(string):
"""wrap a given string with double quotes (e.g. haha -> "haha")
"""
return '\"' + string + '\"'
@staticmethod
def first_words(string, n_words):
"""returns first n words of a string
e.g. first_words('abc def ghi', 2) ==> 'abc def'
"""
words = string.split()
return ' '.join(words[0:min(n_words, len(words))])
@staticmethod
def first_characters(string, n_letters):
"""returns first n letters of a string
e.g. first_characters('abc def ghi', 2) ==> 'ab'
"""
letters = list(string)
return ''.join(letters[0:min(n_letters, len(letters))])
def as_dict(self):
return self.__dict__
class Process(object):
def __init__(self, top_line):
prinfo_as_list = top_line.lstrip().split()
self.pid = prinfo_as_list[0]
self.user = prinfo_as_list[1]
self.cpu = float(prinfo_as_list[8])
self.mem = float(prinfo_as_list[9])
self.command = ' '.join(prinfo_as_list[11:])
def as_dict(self):
return self.__dict__
| 47.392265
| 148
| 0.6199
| 2,308
| 17,156
| 4.480069
| 0.206672
| 0.016538
| 0.013927
| 0.011605
| 0.233559
| 0.193907
| 0.155803
| 0.129594
| 0.122631
| 0.111219
| 0
| 0.023622
| 0.296747
| 17,156
| 361
| 149
| 47.523546
| 0.833402
| 0.418046
| 0
| 0.269841
| 0
| 0
| 0.054473
| 0.014426
| 0
| 0
| 0
| 0
| 0
| 1
| 0.100529
| false
| 0
| 0.005291
| 0.026455
| 0.243386
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e01a18c1d0d2ecbc1fcb6159c9f9c87becb0c6cc
| 1,458
|
py
|
Python
|
venv/Lib/site-packages/zmq/tests/test_draft.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 603
|
2020-12-23T13:49:32.000Z
|
2022-03-31T23:38:03.000Z
|
venv/Lib/site-packages/zmq/tests/test_draft.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 387
|
2020-12-15T14:54:04.000Z
|
2022-03-31T07:00:21.000Z
|
venv/Lib/site-packages/zmq/tests/test_draft.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 35
|
2021-03-26T03:12:04.000Z
|
2022-03-23T10:15:10.000Z
|
# -*- coding: utf8 -*-
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
import platform
import time
import pytest
import zmq
from zmq.tests import BaseZMQTestCase, skip_pypy
class TestDraftSockets(BaseZMQTestCase):
def setUp(self):
if not zmq.DRAFT_API:
raise pytest.skip("draft api unavailable")
super(TestDraftSockets, self).setUp()
def test_client_server(self):
client, server = self.create_bound_pair(zmq.CLIENT, zmq.SERVER)
client.send(b'request')
msg = self.recv(server, copy=False)
assert msg.routing_id is not None
server.send(b'reply', routing_id=msg.routing_id)
reply = self.recv(client)
assert reply == b'reply'
def test_radio_dish(self):
dish, radio = self.create_bound_pair(zmq.DISH, zmq.RADIO)
dish.rcvtimeo = 250
group = 'mygroup'
dish.join(group)
received_count = 0
received = set()
sent = set()
for i in range(10):
msg = str(i).encode('ascii')
sent.add(msg)
radio.send(msg, group=group)
try:
recvd = dish.recv()
except zmq.Again:
time.sleep(0.1)
else:
received.add(recvd)
received_count += 1
# assert that we got *something*
assert len(received.intersection(sent)) >= 5
| 29.16
| 71
| 0.593964
| 179
| 1,458
| 4.75419
| 0.50838
| 0.031727
| 0.037603
| 0.044653
| 0.051704
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01088
| 0.306584
| 1,458
| 49
| 72
| 29.755102
| 0.830861
| 0.095336
| 0
| 0
| 0
| 0
| 0.038052
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.25641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e01cd6185b052b2c9153c8eec135e9e3a2cf7572
| 667
|
py
|
Python
|
base/site-packages/django_qbe/urls.py
|
edisonlz/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
[
"Apache-2.0"
] | 285
|
2019-12-23T09:50:21.000Z
|
2021-12-08T09:08:49.000Z
|
base/site-packages/django_qbe/urls.py
|
jeckun/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
[
"Apache-2.0"
] | null | null | null |
base/site-packages/django_qbe/urls.py
|
jeckun/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
[
"Apache-2.0"
] | 9
|
2019-12-23T12:59:25.000Z
|
2022-03-15T05:12:11.000Z
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
from django_qbe.exports import formats
urlpatterns = patterns('django_qbe.views',
url(r'^$', 'qbe_form', name="qbe_form"),
url(r'^js/$', 'qbe_js', name="qbe_js"),
url(r'^results/bookmark/$',
'qbe_bookmark', name="qbe_bookmark"),
url(r'^results/export/(?P<format>(%s))/$' % "|".join(formats.keys()),
'qbe_export', name="qbe_export"),
url(r'^results/proxy/$',
'qbe_proxy', name="qbe_proxy"),
url(r'^results/(?P<query_hash>(.*))/$',
'qbe_results', name="qbe_results"),
url(r'^auto/$', 'qbe_autocomplete', name="qbe_autocomplete"),
)
| 37.055556
| 73
| 0.611694
| 89
| 667
| 4.393258
| 0.370787
| 0.071611
| 0.112532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00177
| 0.152924
| 667
| 17
| 74
| 39.235294
| 0.690265
| 0.031484
| 0
| 0
| 0
| 0
| 0.427019
| 0.100932
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e01de102906f7a6f8c39855d08b6adaa53f5663c
| 1,347
|
py
|
Python
|
Graph/print all paths from two vertices in a directed graph.py
|
ikaushikpal/DS-450-python
|
9466f77fb9db9e6a5bb3f20aa89ba6332f49e848
|
[
"MIT"
] | 3
|
2021-06-28T12:04:19.000Z
|
2021-09-07T07:23:41.000Z
|
Graph/print all paths from two vertices in a directed graph.py
|
SupriyoDam/DS-450-python
|
5dc21ce61b3279e9bd9d6ef3ad236667227ca283
|
[
"MIT"
] | null | null | null |
Graph/print all paths from two vertices in a directed graph.py
|
SupriyoDam/DS-450-python
|
5dc21ce61b3279e9bd9d6ef3ad236667227ca283
|
[
"MIT"
] | 1
|
2021-06-28T15:42:55.000Z
|
2021-06-28T15:42:55.000Z
|
from collections import defaultdict
class Graph:
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, starting_vertex, end_vertex):
self.graph[starting_vertex].append(end_vertex)
def printAllPaths(self, starting_vertex, target_vertex):
visitedVertices = defaultdict(bool)
self.resultPaths = []
self.dfsUtil(starting_vertex, visitedVertices, target_vertex, "")
return self.resultPaths
def dfsUtil(self, current_vertex, visitedVertices, target_vertex, output_string):
visitedVertices[current_vertex] = True
if output_string == "":
output_string = current_vertex
else:
output_string = output_string + "->" + current_vertex
if current_vertex == target_vertex:
self.resultPaths.append(output_string)
return
for vertex in self.graph[current_vertex]:
if visitedVertices[vertex] == False:
self.dfsUtil(vertex, visitedVertices, target_vertex, output_string)
visitedVertices[vertex] = False
if __name__ == "__main__":
g = Graph()
g.addEdge("A", "B")
g.addEdge("B", "D")
g.addEdge("A", "D")
g.addEdge("C", "A")
g.addEdge("C", "B")
g.addEdge("A", "C")
paths = g.printAllPaths("A", "B")
print(paths)
| 28.659574
| 85
| 0.628062
| 146
| 1,347
| 5.547945
| 0.273973
| 0.103704
| 0.1
| 0.122222
| 0.239506
| 0.239506
| 0.148148
| 0
| 0
| 0
| 0
| 0
| 0.256125
| 1,347
| 46
| 86
| 29.282609
| 0.808383
| 0
| 0
| 0
| 0
| 0
| 0.017817
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.029412
| 0
| 0.235294
| 0.088235
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e01f044aab30cbd5165bae297a319d57b579704e
| 912
|
py
|
Python
|
tierpsy/debugging/catch_infinite_loop.py
|
mgh17/tierpsy-tracker
|
a18c06aa80a5fb22fd51563d82c639b520742777
|
[
"MIT"
] | 9
|
2021-01-11T10:49:21.000Z
|
2022-02-28T15:48:00.000Z
|
tierpsy/debugging/catch_infinite_loop.py
|
mgh17/tierpsy-tracker
|
a18c06aa80a5fb22fd51563d82c639b520742777
|
[
"MIT"
] | 18
|
2020-05-08T15:43:08.000Z
|
2022-03-23T10:19:24.000Z
|
tierpsy/debugging/catch_infinite_loop.py
|
mgh17/tierpsy-tracker
|
a18c06aa80a5fb22fd51563d82c639b520742777
|
[
"MIT"
] | 10
|
2019-12-18T12:10:12.000Z
|
2022-01-05T09:12:47.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 8 16:19:07 2017
@author: ajaver
"""
import os
import cv2
import sys
import glob
import threading
from functools import partial
main_dir = '/Volumes/behavgenom_archive$/Celine/raw/'
fnames = glob.glob(os.path.join(main_dir, '**', '*.avi'))
fnames = [x for x in fnames if not x.endswith('_seg.avi')]
fnames = sorted(fnames)
def get_and_release(video_file):
original = sys.stderr
f = open(os.devnull, 'w')
sys.stderr = f
print('here')
vid = cv2.VideoCapture(video_file)
vid.release()
sys.stderr = original
return vid
all_threads = []
for ii, video_file in enumerate(fnames):
print(ii, video_file)
vid = cv2.VideoCapture(video_file)
vid.release()
t = threading.Thread(target = partial(get_and_release, video_file))
t.start()
all_threads.append((video_file, t))
| 21.714286
| 71
| 0.663377
| 132
| 912
| 4.454545
| 0.537879
| 0.107143
| 0.061224
| 0.061224
| 0.20068
| 0.12585
| 0.12585
| 0
| 0
| 0
| 0
| 0.021948
| 0.200658
| 912
| 42
| 72
| 21.714286
| 0.784636
| 0.105263
| 0
| 0.148148
| 0
| 0
| 0.074257
| 0.049505
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.222222
| 0
| 0.296296
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e02439282d17416800f4bfd8e050f404bc4d7706
| 5,991
|
py
|
Python
|
donkeycar/parts/pytorch/torch_data.py
|
adricl/donkeycar
|
8eb2705ed4161c0d6a9cfd9c7b0a1c0ca5abaeef
|
[
"MIT"
] | 1,100
|
2017-01-18T16:08:33.000Z
|
2018-11-04T00:42:54.000Z
|
donkeycar/parts/pytorch/torch_data.py
|
adricl/donkeycar
|
8eb2705ed4161c0d6a9cfd9c7b0a1c0ca5abaeef
|
[
"MIT"
] | 199
|
2016-12-20T07:45:16.000Z
|
2018-11-01T02:30:12.000Z
|
donkeycar/parts/pytorch/torch_data.py
|
adricl/donkeycar
|
8eb2705ed4161c0d6a9cfd9c7b0a1c0ca5abaeef
|
[
"MIT"
] | 521
|
2017-01-10T21:53:24.000Z
|
2018-11-01T18:17:52.000Z
|
# PyTorch
import torch
from torch.utils.data import IterableDataset, DataLoader
from donkeycar.utils import train_test_split
from donkeycar.parts.tub_v2 import Tub
from torchvision import transforms
from typing import List, Any
from donkeycar.pipeline.types import TubRecord, TubDataset
from donkeycar.pipeline.sequence import TubSequence
import pytorch_lightning as pl
def get_default_transform(for_video=False, for_inference=False, resize=True):
"""
Creates a default transform to work with torchvision models
Video transform:
All pre-trained models expect input images normalized in the same way,
i.e. mini-batches of 3-channel RGB videos of shape (3 x T x H x W),
where H and W are expected to be 112, and T is a number of video frames
in a clip. The images have to be loaded in to a range of [0, 1] and
then normalized using mean = [0.43216, 0.394666, 0.37645] and
std = [0.22803, 0.22145, 0.216989].
"""
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
input_size = (224, 224)
if for_video:
mean = [0.43216, 0.394666, 0.37645]
std = [0.22803, 0.22145, 0.216989]
input_size = (112, 112)
transform_items = [
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
]
if resize:
transform_items.insert(0, transforms.Resize(input_size))
return transforms.Compose(transform_items)
class TorchTubDataset(IterableDataset):
'''
Loads the dataset, and creates a train/test split.
'''
def __init__(self, config, records: List[TubRecord], transform=None):
"""Create a PyTorch Tub Dataset
Args:
config (object): the configuration information
records (List[TubRecord]): a list of tub records
transform (function, optional): a transform to apply to the data
"""
self.config = config
# Handle the transforms
if transform:
self.transform = transform
else:
self.transform = get_default_transform()
self.sequence = TubSequence(records)
self.pipeline = self._create_pipeline()
self.len = len(records)
def _create_pipeline(self):
""" This can be overridden if more complicated pipelines are
required """
def y_transform(record: TubRecord):
angle: float = record.underlying['user/angle']
throttle: float = record.underlying['user/throttle']
predictions = torch.tensor([angle, throttle], dtype=torch.float)
# Normalize to be between [0, 1]
# angle and throttle are originally between [-1, 1]
predictions = (predictions + 1) / 2
return predictions
def x_transform(record: TubRecord):
# Loads the result of Image.open()
img_arr = record.image(cached=True, as_nparray=False)
return self.transform(img_arr)
# Build pipeline using the transformations
pipeline = self.sequence.build_pipeline(x_transform=x_transform,
y_transform=y_transform)
return pipeline
def __len__(self):
return len(self.sequence)
def __iter__(self):
return iter(self.pipeline)
class TorchTubDataModule(pl.LightningDataModule):
def __init__(self, config: Any, tub_paths: List[str], transform=None):
"""Create a PyTorch Lightning Data Module to contain all data loading logic
Args:
config (object): the configuration information
tub_paths (List[str]): a list of paths to the tubs to use (minimum size of 1).
Each tub path corresponds to another training run.
transform (function, optional): a transform to apply to the data
"""
super().__init__()
self.config = config
self.tub_paths = tub_paths
# Handle the transforms
if transform:
self.transform = transform
else:
self.transform = get_default_transform()
self.tubs: List[Tub] = [Tub(tub_path, read_only=True)
for tub_path in self.tub_paths]
self.records: List[TubRecord] = []
def setup(self, stage=None):
"""Load all the tub data and set up the datasets.
Args:
stage ([string], optional): setup expects a string arg stage.
It is used to separate setup logic for trainer.fit
and trainer.test. Defaults to None.
"""
# Loop through all the different tubs and load all the records for each of them
for tub in self.tubs:
for underlying in tub:
record = TubRecord(self.config, tub.base_path,
underlying=underlying)
self.records.append(record)
train_records, val_records = train_test_split(
self.records, test_size=(1. - self.config.TRAIN_TEST_SPLIT))
assert len(val_records) > 0, "Not enough validation data. Add more data"
self.train_dataset = TorchTubDataset(
self.config, train_records, transform=self.transform)
self.val_dataset = TorchTubDataset(
self.config, val_records, transform=self.transform)
def train_dataloader(self):
# The number of workers are set to 0 to avoid errors on Macs and Windows
# See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534
return DataLoader(self.train_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0)
def val_dataloader(self):
# The number of workers are set to 0 to avoid errors on Macs and Windows
# See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534
return DataLoader(self.val_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0)
| 37.21118
| 95
| 0.633951
| 748
| 5,991
| 4.962567
| 0.299465
| 0.02694
| 0.015086
| 0.005927
| 0.25
| 0.235453
| 0.212284
| 0.188039
| 0.188039
| 0.188039
| 0
| 0.037366
| 0.285261
| 5,991
| 160
| 96
| 37.44375
| 0.829519
| 0.336171
| 0
| 0.125
| 0
| 0
| 0.017108
| 0
| 0
| 0
| 0
| 0
| 0.0125
| 1
| 0.1375
| false
| 0
| 0.1125
| 0.05
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e025cd2fbcd0226b08e7474394109f24f199f13c
| 3,857
|
py
|
Python
|
homeassistant/components/sensor/hddtemp.py
|
mdonoughe/home-assistant
|
d9805160bc787146bff0c434fdcab995716f0f8c
|
[
"Apache-2.0"
] | 2
|
2020-02-20T18:47:55.000Z
|
2021-11-09T11:33:28.000Z
|
homeassistant/components/sensor/hddtemp.py
|
mdonoughe/home-assistant
|
d9805160bc787146bff0c434fdcab995716f0f8c
|
[
"Apache-2.0"
] | 1
|
2021-02-08T20:56:06.000Z
|
2021-02-08T20:56:06.000Z
|
homeassistant/components/sensor/hddtemp.py
|
diophung/home-assistant
|
a5aa1118937702ca8bec050614ee52dc14f8466b
|
[
"Apache-2.0"
] | 1
|
2020-11-21T09:37:47.000Z
|
2020-11-21T09:37:47.000Z
|
"""
Support for getting the disk temperature of a host.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.hddtemp/
"""
import logging
from datetime import timedelta
from telnetlib import Telnet
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_PORT, TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_DISKS)
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_DEVICE = 'device'
ATTR_MODEL = 'model'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 7634
DEFAULT_NAME = 'HD Temperature'
DEFAULT_TIMEOUT = 5
SCAN_INTERVAL = timedelta(minutes=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_DISKS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the HDDTemp sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
disks = config.get(CONF_DISKS)
hddtemp = HddTempData(host, port)
hddtemp.update()
if hddtemp.data is None:
return False
if not disks:
disks = [next(iter(hddtemp.data)).split('|')[0]]
dev = []
for disk in disks:
if disk in hddtemp.data:
dev.append(HddTempSensor(name, disk, hddtemp))
add_devices(dev, True)
class HddTempSensor(Entity):
"""Representation of a HDDTemp sensor."""
def __init__(self, name, disk, hddtemp):
"""Initialize a HDDTemp sensor."""
self.hddtemp = hddtemp
self.disk = disk
self._name = '{} {}'.format(name, disk)
self._state = None
self._details = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self._details[3] == 'C':
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_DEVICE: self._details[0],
ATTR_MODEL: self._details[1],
}
def update(self):
"""Get the latest data from HDDTemp daemon and updates the state."""
self.hddtemp.update()
if self.hddtemp.data and self.disk in self.hddtemp.data:
self._details = self.hddtemp.data[self.disk].split('|')
self._state = self._details[2]
else:
self._state = None
class HddTempData(object):
"""Get the latest data from HDDTemp and update the states."""
def __init__(self, host, port):
"""Initialize the data object."""
self.host = host
self.port = port
self.data = None
def update(self):
"""Get the latest data from HDDTemp running as daemon."""
try:
connection = Telnet(
host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT)
data = connection.read_all().decode(
'ascii').lstrip('|').rstrip('|').split('||')
self.data = {data[i].split('|')[0]: data[i]
for i in range(0, len(data), 1)}
except ConnectionRefusedError:
_LOGGER.error(
"HDDTemp is not available at %s:%s", self.host, self.port)
self.data = None
| 29.219697
| 79
| 0.637283
| 481
| 3,857
| 4.966736
| 0.288981
| 0.027627
| 0.025115
| 0.020092
| 0.064044
| 0.044789
| 0.033487
| 0.033487
| 0.033487
| 0
| 0
| 0.004836
| 0.249417
| 3,857
| 131
| 80
| 29.442748
| 0.82038
| 0.160747
| 0
| 0.114943
| 0
| 0
| 0.02678
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.091954
| 0
| 0.287356
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e026ba13c5f7c12090e3dee6c5f9a4f65eca3bb7
| 1,402
|
py
|
Python
|
boomer.py
|
JohnnySn0w/BabbleBot
|
03a383b063e4f28049f27f8ec669f22767ed8a87
|
[
"MIT"
] | 1
|
2019-07-07T01:46:55.000Z
|
2019-07-07T01:46:55.000Z
|
boomer.py
|
JohnnySn0w/BabbleBot
|
03a383b063e4f28049f27f8ec669f22767ed8a87
|
[
"MIT"
] | 1
|
2019-07-26T18:34:02.000Z
|
2019-07-26T18:34:02.000Z
|
boomer.py
|
JohnnySn0w/BabbleBot
|
03a383b063e4f28049f27f8ec669f22767ed8a87
|
[
"MIT"
] | 1
|
2020-05-10T01:27:48.000Z
|
2020-05-10T01:27:48.000Z
|
import random
prefix = [
'Look at you! ',
'Bless ',
'Bless! ',
'I heard about that! ',
'Amen!',
'You and the kids doing alright?',
'Miss ya\'ll!'
]
suffix = [
'. Amen!',
'. God bless america',
'. God bless!',
' haha',
'. love ya!',
'. love ya\'ll!',
]
def add_pre_suf(sentence):
if random.randint(1,10) <= 6:
if random.randint(1,10) <= 5:
sentence = prefix[random.randint(0, len(prefix) - 1)] + sentence
else:
sentence += suffix[random.randint(0, len(suffix) - 1)]
return sentence
def add_elipses(sentence):
words = sentence.split()
for i in range(4, len(words), 5):
if random.randint(1,10) <= 7:
words[i] += "..."
return " ".join(words)
def boomer_caps(sentence):
seed = random.randint(1, 10)
sent_array = sentence.split()
if seed in (1, 2, 3):
return sentence
elif seed in (4, 5):
temp_sent = []
for x in sent_array:
if random.random() < 0.25:
x = x.upper()
temp_sent.append(x)
return " ".join(temp_sent)
elif seed in (6, 7):
temp_sent = []
for x in sent_array:
if random.random() < 0.5:
x = x.upper()
temp_sent.append(x)
return " ".join(temp_sent)
elif seed in (8, 9):
return sentence.title()
elif seed == 10:
return sentence.upper()
| 23.366667
| 76
| 0.53067
| 189
| 1,402
| 3.867725
| 0.343915
| 0.106703
| 0.076607
| 0.087551
| 0.314637
| 0.240766
| 0.240766
| 0.240766
| 0.240766
| 0.240766
| 0
| 0.038382
| 0.312411
| 1,402
| 59
| 77
| 23.762712
| 0.719917
| 0
| 0
| 0.222222
| 0
| 0
| 0.113409
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.018519
| 0
| 0.203704
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|