hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5ebb91cb5e4e0255ed257a4d8c5da3f46959b083
| 1,416
|
py
|
Python
|
Py Apple Dynamics V7.3 SRC/PA-Dynamics V7.3/PA_ATTITUDE.py
|
musen142/py-apple-dynamics
|
95f831ecf9c9167e9709c63deabc989eda6bf669
|
[
"Apache-2.0"
] | 1
|
2022-01-18T11:47:29.000Z
|
2022-01-18T11:47:29.000Z
|
Py Apple Dynamics V7.3 SRC/PA-Dynamics V7.3/PA_ATTITUDE.py
|
musen142/py-apple-dynamics
|
95f831ecf9c9167e9709c63deabc989eda6bf669
|
[
"Apache-2.0"
] | null | null | null |
Py Apple Dynamics V7.3 SRC/PA-Dynamics V7.3/PA_ATTITUDE.py
|
musen142/py-apple-dynamics
|
95f831ecf9c9167e9709c63deabc989eda6bf669
|
[
"Apache-2.0"
] | null | null | null |
from math import sin,cos,pi
def cal_ges(PIT,ROL,l,b,w,x,Hc):
YA=0
P=PIT*pi/180
R=ROL*pi/180
Y=YA*pi/180
#腿1
ABl_x=l/2 - x -(l*cos(P)*cos(Y))/2 + (b*cos(P)*sin(Y))/2
ABl_y=w/2 - (b*(cos(R)*cos(Y) + sin(P)*sin(R)*sin(Y)))/2 - (l*(cos(R)*sin(Y) - cos(Y)*sin(P)*sin(R)))/2
ABl_z= - Hc - (b*(cos(Y)*sin(R) - cos(R)*sin(P)*sin(Y)))/2 - (l*(sin(R)*sin(Y) + cos(R)*cos(Y)*sin(P)))/2
#腿2
AB2_x=l/2 - x - (l*cos(P)*cos(Y))/2 - (b*cos(P)*sin(Y))/2
AB2_y=(b*(cos(R)*cos(Y) + sin(P)*sin(R)*sin(Y)))/2 - w/2 - (l*(cos(R)*sin(Y) - cos(Y)*sin(P)*sin(R)))/2
AB2_z=(b*(cos(Y)*sin(R) - cos(R)*sin(P)*sin(Y)))/2 - Hc - (l*(sin(R)*sin(Y) + cos(R)*cos(Y)*sin(P)))/2
#腿3
AB3_x=(l*cos(P)*cos(Y))/2 - x - l/2 + (b*cos(P)*sin(Y))/2
AB3_y=w/2 - (b*(cos(R)*cos(Y) + sin(P)*sin(R)*sin(Y)))/2 + (l*(cos(R)*sin(Y) - cos(Y)*sin(P)*sin(R)))/2
AB3_z=(l*(sin(R)*sin(Y) + cos(R)*cos(Y)*sin(P)))/2 - (b*(cos(Y)*sin(R) - cos(R)*sin(P)*sin(Y)))/2 - Hc
#腿4
AB4_x=(l*cos(P)*cos(Y))/2 - x - l/2 - (b*cos(P)*sin(Y))/2
AB4_y=(b*(cos(R)*cos(Y) + sin(P)*sin(R)*sin(Y)))/2 - w/2 + (l*(cos(R)*sin(Y) - cos(Y)*sin(P)*sin(R)))/2
AB4_z=(b*(cos(Y)*sin(R) - cos(R)*sin(P)*sin(Y)))/2 - Hc + (l*(sin(R)*sin(Y) + cos(R)*cos(Y)*sin(P)))/2
x1=ABl_x
y1=ABl_z
x2=AB2_x
y2=AB2_z
x3=AB4_x
y3=AB4_z
x4=AB3_x
y4=AB3_z
return x1,x2,x3,x4,y1,y2,y3,y4
| 32.930233
| 109
| 0.47952
| 355
| 1,416
| 1.853521
| 0.115493
| 0.121581
| 0.170213
| 0.145897
| 0.711246
| 0.711246
| 0.711246
| 0.711246
| 0.711246
| 0.711246
| 0
| 0.066379
| 0.180791
| 1,416
| 42
| 110
| 33.714286
| 0.500862
| 0.00565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.037037
| 0
| 0.111111
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0d6498c824c3657f7aeb953f0131a759eba8a2fa
| 68
|
py
|
Python
|
cpo_pipeline/typing/__init__.py
|
DiDigsDNA/cpo-pipeline
|
4b3236ef4fe37e6efa38554e90f6d289d4f1f801
|
[
"MIT"
] | null | null | null |
cpo_pipeline/typing/__init__.py
|
DiDigsDNA/cpo-pipeline
|
4b3236ef4fe37e6efa38554e90f6d289d4f1f801
|
[
"MIT"
] | 31
|
2018-10-11T17:43:19.000Z
|
2019-06-14T19:26:26.000Z
|
cpo_pipeline/typing/__init__.py
|
DiDigsDNA/cpo-pipeline
|
4b3236ef4fe37e6efa38554e90f6d289d4f1f801
|
[
"MIT"
] | 3
|
2018-11-15T18:04:36.000Z
|
2019-05-02T19:09:39.000Z
|
"""
typing module
"""
from . import pipeline
from . import parsers
| 9.714286
| 22
| 0.691176
| 8
| 68
| 5.875
| 0.75
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191176
| 68
| 6
| 23
| 11.333333
| 0.854545
| 0.191176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0d81c066a40ec6d9a8fc2da5c8835fbc0f207108
| 6,871
|
py
|
Python
|
test/test_PointSource/test_point_source.py
|
guoxiaowhu/lenstronomy
|
dcdfc61ce5351ac94565228c822f1c94392c1ad6
|
[
"MIT"
] | 1
|
2018-11-08T12:33:26.000Z
|
2018-11-08T12:33:26.000Z
|
test/test_PointSource/test_point_source.py
|
guoxiaowhu/lenstronomy
|
dcdfc61ce5351ac94565228c822f1c94392c1ad6
|
[
"MIT"
] | null | null | null |
test/test_PointSource/test_point_source.py
|
guoxiaowhu/lenstronomy
|
dcdfc61ce5351ac94565228c822f1c94392c1ad6
|
[
"MIT"
] | null | null | null |
import pytest
import numpy as np
import numpy.testing as npt
from lenstronomy.PointSource.point_source import PointSource
from lenstronomy.LensModel.lens_model import LensModel
from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver
import lenstronomy.Util.param_util as param_util
class TestPointSource(object):
def setup(self):
lensModel = LensModel(lens_model_list=['SPEP'])
solver = LensEquationSolver(lensModel=lensModel)
e1, e2 = param_util.phi_q2_ellipticity(0, 0.7)
self.kwargs_lens = [{'theta_E': 1., 'center_x': 0, 'center_y': 0, 'e1': e1, 'e2': e2, 'gamma': 2}]
self.sourcePos_x, self.sourcePos_y = 0.01, -0.01
self.x_pos, self.y_pos = solver.image_position_from_source(sourcePos_x=self.sourcePos_x,
sourcePos_y=self.sourcePos_y, kwargs_lens=self.kwargs_lens)
self.PointSource = PointSource(point_source_type_list=['LENSED_POSITION', 'UNLENSED', 'SOURCE_POSITION'],
lensModel=lensModel, fixed_magnification_list=[False]*4, additional_images_list=[False]*4)
self.kwargs_ps = [{'ra_image': self.x_pos, 'dec_image': self.y_pos, 'point_amp': np.ones_like(self.x_pos)},
{'ra_image': [1.], 'dec_image': [1.], 'point_amp': [10]},
{'ra_source': self.sourcePos_x, 'dec_source': self.sourcePos_y, 'point_amp': np.ones_like(self.x_pos)}, {}]
def test_image_position(self):
x_image_list, y_image_list = self.PointSource.image_position(kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens)
npt.assert_almost_equal(x_image_list[0][0], self.x_pos[0], decimal=8)
npt.assert_almost_equal(x_image_list[1], 1, decimal=8)
npt.assert_almost_equal(x_image_list[2][0], self.x_pos[0], decimal=8)
def test_source_position(self):
x_source_list, y_source_list = self.PointSource.source_position(kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens)
npt.assert_almost_equal(x_source_list[0], self.sourcePos_x, decimal=8)
npt.assert_almost_equal(x_source_list[1], 1, decimal=8)
npt.assert_almost_equal(x_source_list[2], self.sourcePos_x, decimal=8)
def test_num_basis(self):
num_basis = self.PointSource.num_basis(self.kwargs_ps, self.kwargs_lens)
assert num_basis == 9
def test_linear_response_set(self):
ra_pos, dec_pos, amp, n = self.PointSource.linear_response_set(self.kwargs_ps, kwargs_lens=self.kwargs_lens, with_amp=False, k=None)
num_basis = self.PointSource.num_basis(self.kwargs_ps, self.kwargs_lens)
assert n == num_basis
assert ra_pos[0][0] == self.x_pos[0]
def test_point_source_list(self):
ra_list, dec_list, amp_list = self.PointSource.point_source_list(self.kwargs_ps, self.kwargs_lens)
assert ra_list[0] == self.x_pos[0]
assert len(ra_list) == 9
def test_point_source_amplitude(self):
amp_list = self.PointSource.source_amplitude(self.kwargs_ps, self.kwargs_lens)
assert len(amp_list) == 3
def test_set_save_cache(self):
self.PointSource.set_save_cache(True)
assert self.PointSource._point_source_list[0]._save_cache == True
self.PointSource.set_save_cache(False)
assert self.PointSource._point_source_list[0]._save_cache == False
def test_update_lens_model(self):
lensModel = LensModel(lens_model_list=['SIS'])
self.PointSource.update_lens_model(lens_model_class=lensModel)
kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}]
x_image_list, y_image_list = self.PointSource.image_position(kwargs_ps=self.kwargs_ps,
kwargs_lens=kwargs_lens)
npt.assert_almost_equal(x_image_list[0][0], -0.82654997748011705 , decimal=8)
class TestPointSource_fixed_mag(object):
def setup(self):
lensModel = LensModel(lens_model_list=['SPEP'])
solver = LensEquationSolver(lensModel=lensModel)
e1, e2 = param_util.phi_q2_ellipticity(0, 0.7)
self.kwargs_lens = [{'theta_E': 1., 'center_x': 0, 'center_y': 0, 'e1': e1, 'e2': e2, 'gamma': 2}]
self.sourcePos_x, self.sourcePos_y = 0.01, -0.01
self.x_pos, self.y_pos = solver.image_position_from_source(sourcePos_x=self.sourcePos_x,
sourcePos_y=self.sourcePos_y, kwargs_lens=self.kwargs_lens)
self.PointSource = PointSource(point_source_type_list=['LENSED_POSITION', 'UNLENSED', 'SOURCE_POSITION'],
lensModel=lensModel, fixed_magnification_list=[True]*4, additional_images_list=[False]*4)
self.kwargs_ps = [{'ra_image': self.x_pos, 'dec_image': self.y_pos, 'source_amp': 1},
{'ra_image': [1.], 'dec_image': [1.], 'point_amp': [10]},
{'ra_source': self.sourcePos_x, 'dec_source': self.sourcePos_y, 'source_amp': 1.}, {}]
def test_image_position(self):
x_image_list, y_image_list = self.PointSource.image_position(kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens)
npt.assert_almost_equal(x_image_list[0][0], self.x_pos[0], decimal=8)
npt.assert_almost_equal(x_image_list[1], 1, decimal=8)
npt.assert_almost_equal(x_image_list[2][0], self.x_pos[0], decimal=8)
def test_source_position(self):
x_source_list, y_source_list = self.PointSource.source_position(kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens)
npt.assert_almost_equal(x_source_list[0], self.sourcePos_x, decimal=8)
npt.assert_almost_equal(x_source_list[1], 1, decimal=8)
npt.assert_almost_equal(x_source_list[2], self.sourcePos_x, decimal=8)
def test_num_basis(self):
num_basis = self.PointSource.num_basis(self.kwargs_ps, self.kwargs_lens)
assert num_basis == 3
def test_linear_response_set(self):
ra_pos, dec_pos, amp, n = self.PointSource.linear_response_set(self.kwargs_ps, kwargs_lens=self.kwargs_lens, with_amp=False, k=None)
num_basis = self.PointSource.num_basis(self.kwargs_ps, self.kwargs_lens)
assert n == num_basis
assert ra_pos[0][0] == self.x_pos[0]
assert ra_pos[1][0] == 1
npt.assert_almost_equal(ra_pos[2][0], self.x_pos[0], decimal=8)
def test_point_source_list(self):
ra_list, dec_list, amp_list = self.PointSource.point_source_list(self.kwargs_ps, self.kwargs_lens)
assert ra_list[0] == self.x_pos[0]
assert len(ra_list) == 9
def test_check_image_positions(self):
bool = self.PointSource.check_image_positions(self.kwargs_ps, self.kwargs_lens, tolerance=0.001)
assert bool == True
if __name__ == '__main__':
pytest.main()
| 54.531746
| 140
| 0.679377
| 992
| 6,871
| 4.353831
| 0.099798
| 0.081037
| 0.058347
| 0.06483
| 0.837462
| 0.824959
| 0.809447
| 0.802038
| 0.789998
| 0.768696
| 0
| 0.025912
| 0.202445
| 6,871
| 125
| 141
| 54.968
| 0.762226
| 0
| 0
| 0.613861
| 0
| 0
| 0.050065
| 0
| 0
| 0
| 0
| 0
| 0.287129
| 1
| 0.158416
| false
| 0
| 0.069307
| 0
| 0.247525
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0d8360504869ccd639eced6db8d480f95abb8776
| 37
|
py
|
Python
|
examples/project-sourcecode/c.py
|
wheatdog/guildai
|
817cf179d0b6910d3d4fca522045a8139aef6c9e
|
[
"Apache-2.0"
] | 694
|
2018-11-30T01:06:30.000Z
|
2022-03-31T14:46:26.000Z
|
examples/project-sourcecode/c.py
|
wheatdog/guildai
|
817cf179d0b6910d3d4fca522045a8139aef6c9e
|
[
"Apache-2.0"
] | 323
|
2018-11-05T17:44:34.000Z
|
2022-03-31T16:56:41.000Z
|
examples/project-sourcecode/c.py
|
wheatdog/guildai
|
817cf179d0b6910d3d4fca522045a8139aef6c9e
|
[
"Apache-2.0"
] | 68
|
2019-04-01T04:24:47.000Z
|
2022-02-24T17:22:04.000Z
|
from subproject import d
print("c")
| 9.25
| 24
| 0.72973
| 6
| 37
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 37
| 3
| 25
| 12.333333
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
0dc14be135ed1c10113f4ab5fbd2759629e41d4e
| 192
|
py
|
Python
|
library_management/library_management/doctype/customer_account/customer_account.py
|
jcgurango/library_management
|
f9859499eb12414889277fbdadfcd60290c320dd
|
[
"MIT"
] | null | null | null |
library_management/library_management/doctype/customer_account/customer_account.py
|
jcgurango/library_management
|
f9859499eb12414889277fbdadfcd60290c320dd
|
[
"MIT"
] | null | null | null |
library_management/library_management/doctype/customer_account/customer_account.py
|
jcgurango/library_management
|
f9859499eb12414889277fbdadfcd60290c320dd
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, JC and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class CustomerAccount(Document):
pass
| 21.333333
| 49
| 0.791667
| 25
| 192
| 6.08
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024242
| 0.140625
| 192
| 8
| 50
| 24
| 0.89697
| 0.526042
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
21995407ba2e718df75670c68ae5c377959bb276
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/virtualenv/create/via_global_ref/builtin/cpython/cpython3.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/virtualenv/create/via_global_ref/builtin/cpython/cpython3.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/virtualenv/create/via_global_ref/builtin/cpython/cpython3.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/8f/3e/26/6ee86ef4171b7194b098a053f1e488bca8ba920931fd5f9fb809ad9a37
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.416667
| 0
| 96
| 1
| 96
| 96
| 0.479167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
219f1d9b10fd7858f91ccf44c96ea3fd2cc531d1
| 4,874
|
py
|
Python
|
interlacer/utils.py
|
MedicalVisionGroup/interlacer
|
60c14782729031a2af48c27fddb649d37cdca0e9
|
[
"MIT"
] | null | null | null |
interlacer/utils.py
|
MedicalVisionGroup/interlacer
|
60c14782729031a2af48c27fddb649d37cdca0e9
|
[
"MIT"
] | null | null | null |
interlacer/utils.py
|
MedicalVisionGroup/interlacer
|
60c14782729031a2af48c27fddb649d37cdca0e9
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
def split_reim(array):
"""Split a complex valued matrix into its real and imaginary parts.
Args:
array(complex): An array of shape (batch_size, N, N) or (batch_size, N, N, 1)
Returns:
split_array(float): An array of shape (batch_size, N, N, 2) containing the real part on one channel and the imaginary part on another channel
"""
real = np.real(array)
imag = np.imag(array)
split_array = np.stack((real, imag), axis=3)
return split_array
def split_reim_tensor(array):
"""Split a complex valued tensor into its real and imaginary parts.
Args:
array(complex): A tensor of shape (batch_size, N, N) or (batch_size, N, N, 1)
Returns:
split_array(float): A tensor of shape (batch_size, N, N, 2) containing the real part on one channel and the imaginary part on another channel
"""
real = tf.math.real(array)
imag = tf.math.imag(array)
split_array = tf.stack((real, imag), axis=3)
return split_array
def split_reim_channels(array):
"""Split a complex valued tensor into its real and imaginary parts.
Args:
array(complex): A tensor of shape (batch_size, N, N) or (batch_size, N, N, 1)
Returns:
split_array(float): A tensor of shape (batch_size, N, N, 2) containing the real part on one channel and the imaginary part on another channel
"""
real = tf.math.real(array)
imag = tf.math.imag(array)
n_ch = array.get_shape().as_list()[3]
split_array = tf.concat((real, imag), axis=3)
return split_array
def join_reim(array):
"""Join the real and imaginary channels of a matrix to a single complex-valued matrix.
Args:
array(float): An array of shape (batch_size, N, N, 2)
Returns:
joined_array(complex): An complex-valued array of shape (batch_size, N, N, 1)
"""
joined_array = array[:, :, :, 0] + 1j * array[:, :, :, 1]
return joined_array
def join_reim_tensor(array):
"""Join the real and imaginary channels of a matrix to a single complex-valued matrix.
Args:
array(float): An array of shape (batch_size, N, N, 2)
Returns:
joined_array(complex): A complex-valued array of shape (batch_size, N, N)
"""
joined_array = tf.cast(array[:, :, :, 0], 'complex64') + \
1j * tf.cast(array[:, :, :, 1], 'complex64')
return joined_array
def join_reim_channels(array):
"""Join the real and imaginary channels of a matrix to a single complex-valued matrix.
Args:
array(float): An array of shape (batch_size, N, N, ch)
Returns:
joined_array(complex): A complex-valued array of shape (batch_size, N, N, ch/2)
"""
ch = array.get_shape().as_list()[3]
joined_array = tf.cast(array[:,
:,
:,
:int(ch / 2)],
dtype=tf.complex64) + 1j * tf.cast(array[:,
:,
:,
int(ch / 2):],
dtype=tf.complex64)
return joined_array
def convert_to_frequency_domain(images):
"""Convert an array of images to their Fourier transforms.
Args:
images(float): An array of shape (batch_size, N, N, 2)
Returns:
spectra(float): An FFT-ed array of shape (batch_size, N, N, 2)
"""
n = images.shape[1]
spectra = split_reim(np.fft.fft2(join_reim(images), axes=(1, 2)))
return spectra
def convert_tensor_to_frequency_domain(images):
"""Convert a tensor of images to their Fourier transforms.
Args:
images(float): A tensor of shape (batch_size, N, N, 2)
Returns:
spectra(float): An FFT-ed tensor of shape (batch_size, N, N, 2)
"""
n = images.shape[1]
spectra = split_reim_tensor(tf.signal.fft2d(join_reim_tensor(images)))
return spectra
def convert_to_image_domain(spectra):
"""Convert an array of Fourier spectra to the corresponding images.
Args:
spectra(float): An array of shape (batch_size, N, N, 2)
Returns:
images(float): An IFFT-ed array of shape (batch_size, N, N, 2)
"""
n = spectra.shape[1]
images = split_reim(np.fft.ifft2(join_reim(spectra), axes=(1, 2)))
return images
def convert_tensor_to_image_domain(spectra):
"""Convert an array of Fourier spectra to the corresponding images.
Args:
spectra(float): An array of shape (batch_size, N, N, 2)
Returns:
images(float): An IFFT-ed array of shape (batch_size, N, N, 2)
"""
n = spectra.shape[1]
images = split_reim_tensor(tf.signal.ifft2d(join_reim_tensor(spectra)))
return images
| 29.719512
| 147
| 0.603816
| 691
| 4,874
| 4.140376
| 0.115774
| 0.072352
| 0.080391
| 0.088431
| 0.855645
| 0.786089
| 0.766515
| 0.751136
| 0.734009
| 0.653618
| 0
| 0.014986
| 0.288059
| 4,874
| 163
| 148
| 29.90184
| 0.80951
| 0.498153
| 0
| 0.415094
| 0
| 0
| 0.008276
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.188679
| false
| 0
| 0.037736
| 0
| 0.415094
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
df2cb555b3dc2db771abca035af0535436996ced
| 47
|
py
|
Python
|
multi_parser/shared/__init__.py
|
ilya-mezentsev/multi-parser
|
2d418f38a102fdad826912d4335242a269a26602
|
[
"MIT"
] | 14
|
2020-08-09T06:12:06.000Z
|
2022-03-10T13:16:57.000Z
|
multi_parser/shared/__init__.py
|
ilya-mezentsev/multi-parser
|
2d418f38a102fdad826912d4335242a269a26602
|
[
"MIT"
] | 14
|
2020-08-05T06:18:30.000Z
|
2021-12-13T21:19:38.000Z
|
example/store/serializers/__init__.py
|
defineimpossible/django-rest-batteries
|
d83dc67b6e91ae1a9c7625606a66b59d83936947
|
[
"MIT"
] | null | null | null |
from .request import *
from .response import *
| 15.666667
| 23
| 0.744681
| 6
| 47
| 5.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 47
| 2
| 24
| 23.5
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
df5d5ba2560d1eb8c0481b8f8f3df57ed776d13a
| 83
|
py
|
Python
|
dissononce/dh/x448/private.py
|
dineshks1/dissononce
|
154297aba0e9fdedad9279278f748bd8e4f790c6
|
[
"MIT"
] | 34
|
2019-04-18T03:35:51.000Z
|
2022-03-20T13:35:04.000Z
|
dissononce/dh/x448/private.py
|
dineshks1/dissononce
|
154297aba0e9fdedad9279278f748bd8e4f790c6
|
[
"MIT"
] | 2
|
2019-04-24T06:42:33.000Z
|
2019-07-17T19:40:40.000Z
|
dissononce/dh/x448/private.py
|
dineshks1/dissononce
|
154297aba0e9fdedad9279278f748bd8e4f790c6
|
[
"MIT"
] | 16
|
2019-05-02T08:29:17.000Z
|
2021-12-06T22:50:37.000Z
|
from dissononce.dh import private
class PrivateKey(private.PrivateKey):
pass
| 13.833333
| 37
| 0.783133
| 10
| 83
| 6.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156627
| 83
| 5
| 38
| 16.6
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
80083e1dfe6103dbfacdadbdcb511c7186bad38a
| 26
|
py
|
Python
|
password_policies/tests/__init__.py
|
manuerux/django-password-policies-iplweb
|
5bab0277671fb8c853cec9c8aad64d92195030e9
|
[
"BSD-3-Clause"
] | 5
|
2018-06-21T14:18:56.000Z
|
2021-07-08T17:50:02.000Z
|
password_policies/tests/__init__.py
|
manuerux/django-password-policies-iplweb
|
5bab0277671fb8c853cec9c8aad64d92195030e9
|
[
"BSD-3-Clause"
] | 20
|
2018-01-25T22:01:25.000Z
|
2022-03-15T13:26:47.000Z
|
password_policies/tests/__init__.py
|
manuerux/django-password-policies-iplweb
|
5bab0277671fb8c853cec9c8aad64d92195030e9
|
[
"BSD-3-Clause"
] | 19
|
2018-01-25T21:04:09.000Z
|
2022-03-01T11:26:35.000Z
|
from ..receivers import *
| 13
| 25
| 0.730769
| 3
| 26
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
803369c9001e4847c771fed5ca6b7aaff0451aac
| 2,832
|
py
|
Python
|
reo/migrations/0064_auto_20200616_1708.py
|
GUI/REopt_Lite_API
|
f2ade81b67c526cbe778c7bc584e3e1d616c1efc
|
[
"BSD-3-Clause"
] | 41
|
2020-02-21T08:25:17.000Z
|
2022-01-14T23:06:42.000Z
|
reo/migrations/0064_auto_20200616_1708.py
|
GUI/REopt_Lite_API
|
f2ade81b67c526cbe778c7bc584e3e1d616c1efc
|
[
"BSD-3-Clause"
] | 167
|
2020-02-17T17:26:47.000Z
|
2022-01-20T20:36:54.000Z
|
reo/migrations/0064_auto_20200616_1708.py
|
GUI/REopt_Lite_API
|
f2ade81b67c526cbe778c7bc584e3e1d616c1efc
|
[
"BSD-3-Clause"
] | 31
|
2020-02-20T00:22:51.000Z
|
2021-12-10T05:48:08.000Z
|
# Generated by Django 2.2.10 on 2020-06-16 17:08
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reo', '0063_auto_20200521_1528'),
]
operations = [
migrations.AddField(
model_name='profilemodel',
name='julia_input_construction_seconds',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_input_construction_seconds_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_constriants_seconds',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_constriants_seconds_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_optimize_seconds',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_optimize_seconds_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_postprocess_seconds',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_postprocess_seconds_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_preamble_seconds',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_preamble_seconds_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_variables_seconds',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_variables_seconds_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='loadprofilemodel',
name='doe_reference_name',
field=django.contrib.postgres.fields.ArrayField(base_field=models.TextField(blank=True, null=True), default=list, size=None),
),
]
| 35.4
| 137
| 0.607345
| 271
| 2,832
| 6.121771
| 0.217712
| 0.070524
| 0.101869
| 0.133213
| 0.796866
| 0.796866
| 0.796866
| 0.796866
| 0.796866
| 0.796866
| 0
| 0.015842
| 0.286723
| 2,832
| 79
| 138
| 35.848101
| 0.805446
| 0.016243
| 0
| 0.671233
| 1
| 0
| 0.210489
| 0.145474
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027397
| 0
| 0.068493
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
33a43569f2dc889b1051e353b42c5978e08a2be2
| 35
|
py
|
Python
|
app/__init__.py
|
lwalter/flask-angular-starter
|
31d5468777d429701c8ae0e790458a980fee6837
|
[
"MIT"
] | 13
|
2016-03-24T03:12:05.000Z
|
2021-03-15T14:58:36.000Z
|
app/__init__.py
|
lwalter/flask-angular-starter
|
31d5468777d429701c8ae0e790458a980fee6837
|
[
"MIT"
] | 7
|
2016-03-24T03:20:05.000Z
|
2017-07-19T03:06:13.000Z
|
app/__init__.py
|
lwalter/flask-angular-starter
|
31d5468777d429701c8ae0e790458a980fee6837
|
[
"MIT"
] | 4
|
2017-06-22T05:52:08.000Z
|
2022-02-25T15:25:57.000Z
|
from app.factory import create_app
| 17.5
| 34
| 0.857143
| 6
| 35
| 4.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
33a790e4cc50cc09ac9352f7aad0bd45b97fcd11
| 84
|
py
|
Python
|
src/apps/users/forms/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 27
|
2020-05-03T11:01:27.000Z
|
2022-03-17T05:33:10.000Z
|
src/apps/users/forms/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 54
|
2020-05-09T01:18:41.000Z
|
2022-01-22T10:31:15.000Z
|
src/apps/users/forms/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 9
|
2020-09-29T11:31:32.000Z
|
2022-03-09T01:37:50.000Z
|
from .user_change import UserChangeForm
from .user_creation import UserCreationForm
| 28
| 43
| 0.880952
| 10
| 84
| 7.2
| 0.7
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 84
| 2
| 44
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
33ac31110619eeecf53e6d049d77405ba061c204
| 341
|
py
|
Python
|
CA117/Lab_5/numcomps_32.py
|
PRITI1999/OneLineWonders
|
91a7368e0796e5a3b5839c9165f9fbe5460879f5
|
[
"MIT"
] | 6
|
2016-02-04T00:15:20.000Z
|
2019-10-13T13:53:16.000Z
|
CA117/Lab_5/numcomps_32.py
|
PRITI1999/OneLineWonders
|
91a7368e0796e5a3b5839c9165f9fbe5460879f5
|
[
"MIT"
] | 2
|
2016-03-14T04:01:36.000Z
|
2019-10-16T12:45:34.000Z
|
CA117/Lab_5/numcomps_32.py
|
PRITI1999/OneLineWonders
|
91a7368e0796e5a3b5839c9165f9fbe5460879f5
|
[
"MIT"
] | 10
|
2016-02-09T14:38:32.000Z
|
2021-05-25T08:16:26.000Z
|
print(("Multiples of {}: {}\n"*6).format("3",[n for n in range(1,31)if not n%3],"3 squared",[n**2for n in range(1,31)if not n%3],"4 doubled",[n*2for n in range(1,31)if not n%4],"3 or 4",[n for n in range(1,31)if not(n%4and n%3)],"3 and 4",[n for n in range(1,31)if not(n%4or n%3)],"3 replaced",[n%3and n or'X'for n in range(1,31)]).strip())
| 170.5
| 340
| 0.609971
| 90
| 341
| 2.311111
| 0.288889
| 0.086538
| 0.230769
| 0.259615
| 0.600962
| 0.600962
| 0.533654
| 0.533654
| 0.533654
| 0.524038
| 0
| 0.124579
| 0.129032
| 341
| 1
| 341
| 341
| 0.575758
| 0
| 0
| 0
| 0
| 0
| 0.187683
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
33ad3f8d85b0ae17f8d1f68deb1a77ffc336a163
| 155
|
py
|
Python
|
nocolon_main.py
|
paradoxxxzero/nocolon
|
80bffe09e200b148cd836fd8289c59f2cd33719b
|
[
"BSD-3-Clause"
] | 73
|
2015-05-08T09:22:03.000Z
|
2021-05-20T15:17:18.000Z
|
nocolon_main.py
|
paradoxxxzero/nocolon
|
80bffe09e200b148cd836fd8289c59f2cd33719b
|
[
"BSD-3-Clause"
] | 3
|
2017-05-12T20:57:10.000Z
|
2017-05-15T10:10:30.000Z
|
nocolon_main.py
|
paradoxxxzero/nocolon
|
80bffe09e200b148cd836fd8289c59f2cd33719b
|
[
"BSD-3-Clause"
] | 5
|
2016-10-21T09:29:39.000Z
|
2017-11-15T19:16:29.000Z
|
# Import the encoding
import nocolon
# Now you can import files with the nocolon encoding:
from nocolon_test import nocolon_function
nocolon_function(4)
| 19.375
| 53
| 0.819355
| 23
| 155
| 5.391304
| 0.565217
| 0.209677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007576
| 0.148387
| 155
| 7
| 54
| 22.142857
| 0.931818
| 0.458065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
33aefac32f09c23801a6116bba41fc7dfac6eba4
| 789
|
py
|
Python
|
fedireads/activitypub/__init__.py
|
johnbartholomew/bookwyrm
|
a6593eced7db88f0a68bd19a0e6ba441bf1053c3
|
[
"CC0-1.0"
] | null | null | null |
fedireads/activitypub/__init__.py
|
johnbartholomew/bookwyrm
|
a6593eced7db88f0a68bd19a0e6ba441bf1053c3
|
[
"CC0-1.0"
] | null | null | null |
fedireads/activitypub/__init__.py
|
johnbartholomew/bookwyrm
|
a6593eced7db88f0a68bd19a0e6ba441bf1053c3
|
[
"CC0-1.0"
] | null | null | null |
''' bring activitypub functions into the namespace '''
from .actor import get_actor
from .book import get_book, get_author, get_shelf
from .create import get_create, get_update
from .follow import get_following, get_followers
from .follow import get_follow_request, get_unfollow, get_accept, get_reject
from .outbox import get_outbox, get_outbox_page
from .shelve import get_add, get_remove
from .status import get_review, get_review_article
from .status import get_rating, get_rating_note
from .status import get_comment, get_comment_article
from .status import get_quotation, get_quotation_article
from .status import get_status, get_replies, get_replies_page
from .status import get_favorite, get_unfavorite
from .status import get_boost
from .status import get_add_tag, get_remove_tag
| 46.411765
| 76
| 0.844106
| 124
| 789
| 5.040323
| 0.314516
| 0.216
| 0.2048
| 0.2432
| 0.1248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107731
| 789
| 16
| 77
| 49.3125
| 0.887784
| 0.058302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
33f7daec8520bf61c9a9ff557667fd5b5759236d
| 2,598
|
py
|
Python
|
experiments/e2_multi_directional_model_comparison/file_naming/rules/single_target_tree_rule_naming.py
|
joschout/Multi-Directional-Rule-Set-Learning
|
ef0620b115f4e0fd7fba3e752d238a8020c1ca6b
|
[
"Apache-2.0"
] | 3
|
2020-08-03T19:25:44.000Z
|
2021-06-27T22:25:55.000Z
|
experiments/e2_multi_directional_model_comparison/file_naming/rules/single_target_tree_rule_naming.py
|
joschout/Multi-Directional-Rule-Set-Learning
|
ef0620b115f4e0fd7fba3e752d238a8020c1ca6b
|
[
"Apache-2.0"
] | null | null | null |
experiments/e2_multi_directional_model_comparison/file_naming/rules/single_target_tree_rule_naming.py
|
joschout/Multi-Directional-Rule-Set-Learning
|
ef0620b115f4e0fd7fba3e752d238a8020c1ca6b
|
[
"Apache-2.0"
] | 2
|
2020-08-07T22:54:28.000Z
|
2021-02-18T06:11:01.000Z
|
import os
from experiments.file_naming.single_target_classifier_indicator import SingleTargetClassifierIndicator
from project_info import project_dir
def get_single_target_tree_rule_dir() -> str:
mcars_dir: str = os.path.join(project_dir,
'models',
'single_target_tree_rules')
if not os.path.exists(mcars_dir):
os.makedirs(mcars_dir)
return mcars_dir
def get_single_target_tree_rules_relative_file_name_without_extension(
dataset_name: str, fold_i: int,
target_attribute: str,
classifier_indicator: SingleTargetClassifierIndicator,
nb_of_trees_per_model: int,
min_support: float,
max_depth: int
) -> str:
return (
f"{dataset_name}{fold_i}_{target_attribute}_{str(classifier_indicator.value)}"
f"_{nb_of_trees_per_model}trees"
f"_{min_support}supp_{max_depth}depth"
)
def get_single_target_tree_rules_abs_file_name(
dataset_name: str, fold_i: int,
target_attribute: str,
classifier_indicator: SingleTargetClassifierIndicator,
nb_of_trees_per_model: int,
min_support: float,
max_depth: int,
):
rules_dir = get_single_target_tree_rule_dir()
relative_file_name: str = get_single_target_tree_rules_relative_file_name_without_extension(
dataset_name=dataset_name, fold_i=fold_i,
target_attribute=target_attribute,
classifier_indicator=classifier_indicator,
nb_of_trees_per_model=nb_of_trees_per_model,
min_support=min_support, max_depth=max_depth
)
tree_derived_rule_abs_file_name = os.path.join(rules_dir, f"{relative_file_name}.json.gz")
return tree_derived_rule_abs_file_name
def get_single_target_tree_rules_gen_timing_info_abs_file_name(
dataset_name: str, fold_i: int,
target_attribute: str,
classifier_indicator: SingleTargetClassifierIndicator,
nb_of_trees_per_model: int,
min_support: float,
max_depth: int,
):
rules_dir = get_single_target_tree_rule_dir()
relative_file_name: str = get_single_target_tree_rules_relative_file_name_without_extension(
dataset_name=dataset_name, fold_i=fold_i,
target_attribute=target_attribute,
classifier_indicator=classifier_indicator,
nb_of_trees_per_model=nb_of_trees_per_model,
min_support=min_support, max_depth=max_depth
)
tree_derived_rule_abs_file_name = os.path.join(rules_dir, f"{relative_file_name}_timings.json.gz")
return tree_derived_rule_abs_file_name
| 36.591549
| 102
| 0.734411
| 345
| 2,598
| 4.976812
| 0.168116
| 0.060571
| 0.083867
| 0.088527
| 0.808387
| 0.776937
| 0.723355
| 0.723355
| 0.723355
| 0.679091
| 0
| 0
| 0.200924
| 2,598
| 70
| 103
| 37.114286
| 0.827071
| 0
| 0
| 0.59322
| 0
| 0
| 0.089684
| 0.087375
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0
| 0.050847
| 0.016949
| 0.186441
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1daa69dd3bb44dba1f878107d4e4e2d32c7a2934
| 43
|
py
|
Python
|
utils/__init__.py
|
Lolik-Bolik/The-production-cells-formation-problem
|
8c4f5790b92fbca7c9c5c8143c7e70fb3bb8b78b
|
[
"MIT"
] | 5
|
2020-06-01T18:58:14.000Z
|
2020-06-17T04:52:49.000Z
|
utils/__init__.py
|
Lolik-Bolik/The-production-cells-formation-problem
|
8c4f5790b92fbca7c9c5c8143c7e70fb3bb8b78b
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
Lolik-Bolik/The-production-cells-formation-problem
|
8c4f5790b92fbca7c9c5c8143c7e70fb3bb8b78b
|
[
"MIT"
] | null | null | null |
from .dataloader import CellsProductionData
| 43
| 43
| 0.906977
| 4
| 43
| 9.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 43
| 1
| 43
| 43
| 0.975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d519270c80775a7bacb99ae959f7231648e44d40
| 222
|
py
|
Python
|
ckan_cloud_operator/drivers/kubectl/driver.py
|
MuhammadIsmailShahzad/ckan-cloud-operator
|
35a4ca88c4908d81d1040a21fca8904e77c4cded
|
[
"MIT"
] | 14
|
2019-11-18T12:01:03.000Z
|
2021-09-15T15:29:50.000Z
|
ckan_cloud_operator/drivers/kubectl/driver.py
|
MuhammadIsmailShahzad/ckan-cloud-operator
|
35a4ca88c4908d81d1040a21fca8904e77c4cded
|
[
"MIT"
] | 52
|
2019-09-09T14:22:41.000Z
|
2021-09-29T08:29:24.000Z
|
ckan_cloud_operator/drivers/kubectl/driver.py
|
MuhammadIsmailShahzad/ckan-cloud-operator
|
35a4ca88c4908d81d1040a21fca8904e77c4cded
|
[
"MIT"
] | 8
|
2019-10-05T12:46:25.000Z
|
2021-09-15T15:13:05.000Z
|
from ckan_cloud_operator import kubectl
def get(what, *args, required=True, namespace=None, get_cmd=None, **kwargs):
return kubectl.get(what, *args, required=required, namespace=namespace, get_cmd=get_cmd, **kwargs)
| 37
| 102
| 0.765766
| 32
| 222
| 5.15625
| 0.53125
| 0.109091
| 0.133333
| 0.230303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 222
| 5
| 103
| 44.4
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
d53653c57078f22dc6820daf96fa072146e66f13
| 100
|
py
|
Python
|
zim/plugins/zimclip/tests/__init__.py
|
stiles69/bin
|
a327326ae22933e44c7ee2268f973dcedf7c8b3c
|
[
"MIT"
] | null | null | null |
zim/plugins/zimclip/tests/__init__.py
|
stiles69/bin
|
a327326ae22933e44c7ee2268f973dcedf7c8b3c
|
[
"MIT"
] | null | null | null |
zim/plugins/zimclip/tests/__init__.py
|
stiles69/bin
|
a327326ae22933e44c7ee2268f973dcedf7c8b3c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
import os
import sys
import unittest
# FIXME Do some tests
| 11.111111
| 23
| 0.7
| 15
| 100
| 4.666667
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0125
| 0.2
| 100
| 8
| 24
| 12.5
| 0.8625
| 0.41
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d53df78809e1584483410583a6ebc437b5a2b0ef
| 36
|
py
|
Python
|
indra/assemblers/tsv/__init__.py
|
zebulon2/indra
|
7727ddcab52ad8012eb6592635bfa114e904bd48
|
[
"BSD-2-Clause"
] | 136
|
2016-02-11T22:06:37.000Z
|
2022-03-31T17:26:20.000Z
|
indra/assemblers/tsv/__init__.py
|
zebulon2/indra
|
7727ddcab52ad8012eb6592635bfa114e904bd48
|
[
"BSD-2-Clause"
] | 748
|
2016-02-03T16:27:56.000Z
|
2022-03-09T14:27:54.000Z
|
indra/assemblers/tsv/__init__.py
|
zebulon2/indra
|
7727ddcab52ad8012eb6592635bfa114e904bd48
|
[
"BSD-2-Clause"
] | 56
|
2015-08-28T14:03:44.000Z
|
2022-02-04T06:15:55.000Z
|
from .assembler import TsvAssembler
| 18
| 35
| 0.861111
| 4
| 36
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d57eb3183cea1c9fed2cc78a667014a3b96be463
| 115
|
py
|
Python
|
example/order_scope_level/feature2/test_b.py
|
DevilXD/pytest-order
|
88685d802cb18bf04f72d0e8ec484d56bb3473d3
|
[
"MIT"
] | 41
|
2021-03-16T07:57:00.000Z
|
2022-03-01T10:02:10.000Z
|
example/order_scope_level/feature2/test_b.py
|
DevilXD/pytest-order
|
88685d802cb18bf04f72d0e8ec484d56bb3473d3
|
[
"MIT"
] | 39
|
2021-03-04T16:50:04.000Z
|
2022-02-18T18:51:14.000Z
|
example/order_scope_level/feature2/test_b.py
|
DevilXD/pytest-order
|
88685d802cb18bf04f72d0e8ec484d56bb3473d3
|
[
"MIT"
] | 9
|
2021-03-04T18:27:12.000Z
|
2021-12-16T06:46:13.000Z
|
import pytest
@pytest.mark.order(4)
def test_four():
pass
@pytest.mark.order(3)
def test_three():
pass
| 9.583333
| 21
| 0.669565
| 18
| 115
| 4.166667
| 0.611111
| 0.266667
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021505
| 0.191304
| 115
| 11
| 22
| 10.454545
| 0.784946
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| true
| 0.285714
| 0.142857
| 0
| 0.428571
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
d5892906d499f0f5a6f042d091d257df411e9d0c
| 31,503
|
py
|
Python
|
etl/parsers/etw/Microsoft_Windows_USB_USBHUB.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 104
|
2020-03-04T14:31:31.000Z
|
2022-03-28T02:59:36.000Z
|
etl/parsers/etw/Microsoft_Windows_USB_USBHUB.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 7
|
2020-04-20T09:18:39.000Z
|
2022-03-19T17:06:19.000Z
|
etl/parsers/etw/Microsoft_Windows_USB_USBHUB.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 16
|
2020-03-05T18:55:59.000Z
|
2022-03-01T10:19:28.000Z
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-USB-USBHUB
GUID : 7426a56b-e2d5-4b30-bdef-b31815c1a74a
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=1, version=0)
class Microsoft_Windows_USB_USBHUB_1_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_USB_HubDescriptor" / Int64ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=2, version=0)
class Microsoft_Windows_USB_USBHUB_2_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / CString,
"fid_USBHUB_Hub" / Int32sl
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=3, version=0)
class Microsoft_Windows_USB_USBHUB_3_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_USB_HubDescriptor" / Int64ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=10, version=0)
class Microsoft_Windows_USB_USBHUB_10_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=11, version=0)
class Microsoft_Windows_USB_USBHUB_11_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=12, version=0)
class Microsoft_Windows_USB_USBHUB_12_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=13, version=0)
class Microsoft_Windows_USB_USBHUB_13_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=14, version=0)
class Microsoft_Windows_USB_USBHUB_14_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=15, version=0)
class Microsoft_Windows_USB_USBHUB_15_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=16, version=0)
class Microsoft_Windows_USB_USBHUB_16_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=17, version=0)
class Microsoft_Windows_USB_USBHUB_17_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=18, version=0)
class Microsoft_Windows_USB_USBHUB_18_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=19, version=0)
class Microsoft_Windows_USB_USBHUB_19_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=20, version=0)
class Microsoft_Windows_USB_USBHUB_20_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=21, version=0)
class Microsoft_Windows_USB_USBHUB_21_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=22, version=0)
class Microsoft_Windows_USB_USBHUB_22_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=23, version=0)
class Microsoft_Windows_USB_USBHUB_23_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=24, version=0)
class Microsoft_Windows_USB_USBHUB_24_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=25, version=0)
class Microsoft_Windows_USB_USBHUB_25_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=26, version=0)
class Microsoft_Windows_USB_USBHUB_26_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=27, version=0)
class Microsoft_Windows_USB_USBHUB_27_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=28, version=0)
class Microsoft_Windows_USB_USBHUB_28_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=29, version=0)
class Microsoft_Windows_USB_USBHUB_29_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=30, version=0)
class Microsoft_Windows_USB_USBHUB_30_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=31, version=0)
class Microsoft_Windows_USB_USBHUB_31_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=32, version=0)
class Microsoft_Windows_USB_USBHUB_32_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=33, version=0)
class Microsoft_Windows_USB_USBHUB_33_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=34, version=0)
class Microsoft_Windows_USB_USBHUB_34_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=35, version=0)
class Microsoft_Windows_USB_USBHUB_35_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=36, version=0)
class Microsoft_Windows_USB_USBHUB_36_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=37, version=0)
class Microsoft_Windows_USB_USBHUB_37_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=39, version=0)
class Microsoft_Windows_USB_USBHUB_39_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=40, version=0)
class Microsoft_Windows_USB_USBHUB_40_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=41, version=0)
class Microsoft_Windows_USB_USBHUB_41_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=49, version=0)
class Microsoft_Windows_USB_USBHUB_49_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=50, version=0)
class Microsoft_Windows_USB_USBHUB_50_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=51, version=0)
class Microsoft_Windows_USB_USBHUB_51_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=59, version=0)
class Microsoft_Windows_USB_USBHUB_59_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=60, version=0)
class Microsoft_Windows_USB_USBHUB_60_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=61, version=0)
class Microsoft_Windows_USB_USBHUB_61_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=62, version=0)
class Microsoft_Windows_USB_USBHUB_62_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=63, version=0)
class Microsoft_Windows_USB_USBHUB_63_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=64, version=0)
class Microsoft_Windows_USB_USBHUB_64_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=70, version=0)
class Microsoft_Windows_USB_USBHUB_70_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=71, version=0)
class Microsoft_Windows_USB_USBHUB_71_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=80, version=0)
class Microsoft_Windows_USB_USBHUB_80_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_PortAttributes" / Int16ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=81, version=0)
class Microsoft_Windows_USB_USBHUB_81_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=82, version=0)
class Microsoft_Windows_USB_USBHUB_82_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=83, version=0)
class Microsoft_Windows_USB_USBHUB_83_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=84, version=0)
class Microsoft_Windows_USB_USBHUB_84_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=100, version=0)
class Microsoft_Windows_USB_USBHUB_100_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Device" / Int64sl,
"fid_USBHUB_Device_State" / Guid,
"fid_DeviceDescriptor" / Int64ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=101, version=0)
class Microsoft_Windows_USB_USBHUB_101_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / CString,
"fid_USBHUB_Device" / Int32sl
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=102, version=0)
class Microsoft_Windows_USB_USBHUB_102_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Device" / Int64sl,
"fid_USBHUB_Device_State" / Guid,
"fid_DeviceDescriptor" / Int64ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=103, version=0)
class Microsoft_Windows_USB_USBHUB_103_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_DeviceDescription" / WString
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=110, version=0)
class Microsoft_Windows_USB_USBHUB_110_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=111, version=0)
class Microsoft_Windows_USB_USBHUB_111_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=112, version=0)
class Microsoft_Windows_USB_USBHUB_112_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=113, version=0)
class Microsoft_Windows_USB_USBHUB_113_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=114, version=0)
class Microsoft_Windows_USB_USBHUB_114_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_DeviceDescription" / WString
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=119, version=0)
class Microsoft_Windows_USB_USBHUB_119_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=120, version=0)
class Microsoft_Windows_USB_USBHUB_120_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Device" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=121, version=0)
class Microsoft_Windows_USB_USBHUB_121_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Device" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=122, version=0)
class Microsoft_Windows_USB_USBHUB_122_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Device" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=123, version=0)
class Microsoft_Windows_USB_USBHUB_123_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Device" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=130, version=0)
class Microsoft_Windows_USB_USBHUB_130_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=139, version=0)
class Microsoft_Windows_USB_USBHUB_139_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=140, version=0)
class Microsoft_Windows_USB_USBHUB_140_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=149, version=0)
class Microsoft_Windows_USB_USBHUB_149_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=150, version=0)
class Microsoft_Windows_USB_USBHUB_150_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=151, version=0)
class Microsoft_Windows_USB_USBHUB_151_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=159, version=0)
class Microsoft_Windows_USB_USBHUB_159_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=160, version=0)
class Microsoft_Windows_USB_USBHUB_160_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=161, version=0)
class Microsoft_Windows_USB_USBHUB_161_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=169, version=0)
class Microsoft_Windows_USB_USBHUB_169_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=170, version=0)
class Microsoft_Windows_USB_USBHUB_170_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=171, version=0)
class Microsoft_Windows_USB_USBHUB_171_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=172, version=0)
class Microsoft_Windows_USB_USBHUB_172_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=173, version=0)
class Microsoft_Windows_USB_USBHUB_173_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=174, version=0)
class Microsoft_Windows_USB_USBHUB_174_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=175, version=0)
class Microsoft_Windows_USB_USBHUB_175_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=176, version=0)
class Microsoft_Windows_USB_USBHUB_176_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=177, version=0)
class Microsoft_Windows_USB_USBHUB_177_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=178, version=0)
class Microsoft_Windows_USB_USBHUB_178_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=179, version=0)
class Microsoft_Windows_USB_USBHUB_179_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=180, version=0)
class Microsoft_Windows_USB_USBHUB_180_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=181, version=0)
class Microsoft_Windows_USB_USBHUB_181_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=183, version=0)
class Microsoft_Windows_USB_USBHUB_183_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=184, version=0)
class Microsoft_Windows_USB_USBHUB_184_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=185, version=0)
class Microsoft_Windows_USB_USBHUB_185_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=189, version=0)
class Microsoft_Windows_USB_USBHUB_189_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=190, version=0)
class Microsoft_Windows_USB_USBHUB_190_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=199, version=0)
class Microsoft_Windows_USB_USBHUB_199_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=200, version=0)
class Microsoft_Windows_USB_USBHUB_200_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=209, version=0)
class Microsoft_Windows_USB_USBHUB_209_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PowerState" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=210, version=0)
class Microsoft_Windows_USB_USBHUB_210_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int32sl,
"fid_USBHUB_Hub" / Double,
"fid_PortNumber" / Int32ul,
"fid_Class" / Int32ul,
"fid_NtStatus" / Int32ul,
"fid_UsbdStatus" / Int32ul,
"fid_DebugText" / CString
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=211, version=0)
class Microsoft_Windows_USB_USBHUB_211_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_PortStatusChange" / Int16ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=212, version=0)
class Microsoft_Windows_USB_USBHUB_212_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_TimerTag" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=213, version=0)
class Microsoft_Windows_USB_USBHUB_213_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_TimerTag" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=214, version=0)
class Microsoft_Windows_USB_USBHUB_214_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_TimerTag" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=220, version=0)
class Microsoft_Windows_USB_USBHUB_220_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=229, version=0)
class Microsoft_Windows_USB_USBHUB_229_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=230, version=0)
class Microsoft_Windows_USB_USBHUB_230_0(Etw):
pattern = Struct(
"fid_TimeElapsedBeforeLogStart" / Int64ul,
"fid_USBHUB_HC" / Int32ul,
"fid_USBHUB_Hub" / Int8ul,
"fid_PortNumber" / Int32ul,
"fid_Class" / Int32ul,
"fid_NtStatus" / Int32ul,
"fid_UsbdStatus" / Int32ul,
"fid_DebugText" / CString
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=231, version=0)
class Microsoft_Windows_USB_USBHUB_231_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=232, version=0)
class Microsoft_Windows_USB_USBHUB_232_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8sl,
"fid_USBHUB_Device" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=233, version=0)
class Microsoft_Windows_USB_USBHUB_233_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
@declare(guid=guid("7426a56b-e2d5-4b30-bdef-b31815c1a74a"), event_id=234, version=0)
class Microsoft_Windows_USB_USBHUB_234_0(Etw):
pattern = Struct(
"fid_USBHUB_HC" / Int8ul,
"fid_USBHUB_Hub" / Int64sl,
"fid_PortNumber" / Int32ul,
"fid_Status" / Int32ul
)
| 29.917379
| 123
| 0.669301
| 3,924
| 31,503
| 5.052243
| 0.043068
| 0.09715
| 0.102547
| 0.134931
| 0.954502
| 0.953493
| 0.951677
| 0.747087
| 0.747087
| 0.747087
| 0
| 0.139437
| 0.207091
| 31,503
| 1,052
| 124
| 29.945817
| 0.65423
| 0.003016
| 0
| 0.603118
| 0
| 0
| 0.292143
| 0.126596
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004796
| 0
| 0.258993
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d590b2e41df4cf47a10f9711b5f682f57ac29747
| 62
|
py
|
Python
|
lib/__init__.py
|
co9olguy/Generating-and-designing-DNA
|
7dab87a1002790d37e929c5542f9761ae7d16416
|
[
"Unlicense"
] | 32
|
2018-04-29T22:34:43.000Z
|
2022-03-14T05:54:25.000Z
|
lib/__init__.py
|
co9olguy/Generating-and-designing-DNA
|
7dab87a1002790d37e929c5542f9761ae7d16416
|
[
"Unlicense"
] | 3
|
2019-04-02T07:05:34.000Z
|
2022-02-18T17:34:03.000Z
|
lib/__init__.py
|
co9olguy/Generating-and-designing-DNA
|
7dab87a1002790d37e929c5542f9761ae7d16416
|
[
"Unlicense"
] | 11
|
2018-05-25T09:31:37.000Z
|
2021-12-13T17:58:29.000Z
|
from .utils import *
from .models import *
from .dna import *
| 15.5
| 21
| 0.709677
| 9
| 62
| 4.888889
| 0.555556
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193548
| 62
| 3
| 22
| 20.666667
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d5af5a629e72cd72e0929e6e0460c5c839714274
| 6,763
|
py
|
Python
|
bike/refactor/test_moveToModule.py
|
debiancn/bicyclerepair
|
dd054e802d6d8ad80baeccee0396da68144f2a26
|
[
"ICU"
] | 2
|
2020-05-29T06:31:53.000Z
|
2020-12-19T21:49:25.000Z
|
bike/refactor/test_moveToModule.py
|
debiancn/bicyclerepair
|
dd054e802d6d8ad80baeccee0396da68144f2a26
|
[
"ICU"
] | null | null | null |
bike/refactor/test_moveToModule.py
|
debiancn/bicyclerepair
|
dd054e802d6d8ad80baeccee0396da68144f2a26
|
[
"ICU"
] | null | null | null |
#!/usr/bin/env python
import setpath
from bike.testutils import *
from bike.transformer.save import save
from moveToModule import *
class TestMoveClass(BRMTestCase):
def test_movesTheText(self):
src1=trimLines("""
def before(): pass
class TheClass:
pass
def after(): pass
""")
src1after=trimLines("""
def before(): pass
def after(): pass
""")
src2after=trimLines("""
class TheClass:
pass
""")
try:
createPackageStructure(src1, "")
moveClassToNewModule(pkgstructureFile1,2,
pkgstructureFile2)
save()
self.assertEqual(src1after,file(pkgstructureFile1).read())
self.assertEqual(src2after,file(pkgstructureFile2).read())
finally:
removePackageStructure()
class TestMoveFunction(BRMTestCase):
def test_importsNameReference(self):
src1=trimLines("""
a = 'hello'
def theFunction(self):
print a
""")
src2after=trimLines("""
from a.foo import a
def theFunction(self):
print a
""")
self.helper(src1, src2after)
def test_importsExternalReference(self):
src0=("""
a = 'hello'
""")
src1=trimLines("""
from top import a
def theFunction(self):
print a
""")
src2after=trimLines("""
from top import a
def theFunction(self):
print a
""")
try:
createPackageStructure(src1, "", src0)
moveFunctionToNewModule(pkgstructureFile1,2,
pkgstructureFile2)
save()
self.assertEqual(src2after,file(pkgstructureFile2).read())
finally:
removePackageStructure()
def test_doesntImportRefCreatedInFunction(self):
src1=trimLines("""
def theFunction(self):
a = 'hello'
print a
""")
src2after=trimLines("""
def theFunction(self):
a = 'hello'
print a
""")
self.helper(src1, src2after)
def test_doesntImportRefCreatedInFunction(self):
src1=trimLines("""
def theFunction(self):
a = 'hello'
print a
""")
src2after=trimLines("""
def theFunction(self):
a = 'hello'
print a
""")
self.helper(src1, src2after)
def test_addsImportStatementToOriginalFileIfRequired(self):
src1=trimLines("""
def theFunction(self):
pass
b = theFunction()
""")
src1after=trimLines("""
from a.b.bah import theFunction
b = theFunction()
""")
try:
createPackageStructure(src1,"")
moveFunctionToNewModule(pkgstructureFile1,1,
pkgstructureFile2)
save()
self.assertEqual(src1after,file(pkgstructureFile1).read())
finally:
removePackageStructure()
def test_updatesFromImportStatementsInOtherModules(self):
src0=trimLines("""
from a.foo import theFunction
print theFunction()
""")
src1=trimLines("""
def theFunction(self):
pass
""")
src0after=trimLines("""
from a.b.bah import theFunction
print theFunction()
""")
try:
createPackageStructure(src1,"",src0)
moveFunctionToNewModule(pkgstructureFile1,1,
pkgstructureFile2)
save()
self.assertEqual(src0after,file(pkgstructureFile0).read())
finally:
removePackageStructure()
def test_updatesFromImportMultiplesInOtherModules(self):
src0=trimLines("""
from a.foo import something,theFunction,somethingelse #comment
print theFunction()
""")
src1=trimLines("""
def theFunction(self):
pass
something = ''
somethingelse = 0
""")
src0after=trimLines("""
from a.foo import something,somethingelse #comment
from a.b.bah import theFunction
print theFunction()
""")
try:
createPackageStructure(src1,"",src0)
moveFunctionToNewModule(pkgstructureFile1,1,
pkgstructureFile2)
save()
self.assertEqual(src0after,file(pkgstructureFile0).read())
finally:
removePackageStructure()
def test_updatesFromImportMultiplesInTargetModule(self):
src0=trimLines("""
from a.foo import something,theFunction,somethingelse #comment
print theFunction()
""")
src1=trimLines("""
def theFunction(self):
pass
something = ''
somethingelse = 0
""")
src0after=trimLines("""
from a.foo import something,somethingelse #comment
print theFunction()
def theFunction(self):
pass
""")
try:
createPackageStructure(src1,"",src0)
moveFunctionToNewModule(pkgstructureFile1,1,
pkgstructureFile0)
save()
#print file(pkgstructureFile0).read()
self.assertEqual(src0after,file(pkgstructureFile0).read())
finally:
removePackageStructure()
def test_updatesFromImportInTargetModule(self):
src0=trimLines("""
from a.foo import theFunction
print theFunction()
""")
src1=trimLines("""
def theFunction(self):
pass
""")
src0after=trimLines("""
print theFunction()
def theFunction(self):
pass
""")
try:
createPackageStructure(src1,"",src0)
moveFunctionToNewModule(pkgstructureFile1,1,
pkgstructureFile0)
save()
self.assertEqual(src0after,file(pkgstructureFile0).read())
finally:
removePackageStructure()
def helper(self, src1, src2after):
try:
createPackageStructure(src1, "")
moveFunctionToNewModule(pkgstructureFile1,2,
pkgstructureFile2)
save()
self.assertEqual(src2after,file(pkgstructureFile2).read())
finally:
removePackageStructure()
if __name__ == "__main__":
unittest.main()
| 27.946281
| 70
| 0.530534
| 476
| 6,763
| 7.5
| 0.144958
| 0.058824
| 0.07563
| 0.068067
| 0.785154
| 0.757703
| 0.732493
| 0.704202
| 0.617647
| 0.617647
| 0
| 0.021724
| 0.373799
| 6,763
| 241
| 71
| 28.062241
| 0.821251
| 0.00828
| 0
| 0.904306
| 0
| 0
| 0.331096
| 0.017301
| 0
| 0
| 0
| 0
| 0.043062
| 1
| 0.052632
| false
| 0.062201
| 0.119617
| 0
| 0.181818
| 0.076555
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
6389dc63d6c399ed10f73f80566508686888935c
| 82
|
py
|
Python
|
pybomberman/__init__.py
|
pybomberman/pybomberman
|
8c7582ec52bf0dd1d77a3e98f5867ffa97233653
|
[
"MIT"
] | 2
|
2021-03-29T08:44:54.000Z
|
2021-05-03T23:34:06.000Z
|
pybomberman/__init__.py
|
pybomberman/pybomberman
|
8c7582ec52bf0dd1d77a3e98f5867ffa97233653
|
[
"MIT"
] | null | null | null |
pybomberman/__init__.py
|
pybomberman/pybomberman
|
8c7582ec52bf0dd1d77a3e98f5867ffa97233653
|
[
"MIT"
] | null | null | null |
from .map import Map
print("Soon... https://github.com/pybomberman/pybomberman")
| 20.5
| 59
| 0.743902
| 11
| 82
| 5.545455
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085366
| 82
| 3
| 60
| 27.333333
| 0.813333
| 0
| 0
| 0
| 0
| 0
| 0.609756
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
63adc83e9b99bdc20e56c3462b56c6b2c4cdbcd3
| 4,523
|
py
|
Python
|
FinBot/intent/Loki_Exchange.py
|
Lanlanluuu/LokiHub
|
aae3efb566d2383e78eaa8dc1e8b3f1bb097f2a6
|
[
"MIT"
] | 17
|
2020-11-25T07:40:18.000Z
|
2022-03-07T03:29:18.000Z
|
FinBot/intent/Loki_Exchange.py
|
Lanlanluuu/LokiHub
|
aae3efb566d2383e78eaa8dc1e8b3f1bb097f2a6
|
[
"MIT"
] | 8
|
2020-12-18T13:23:59.000Z
|
2021-10-03T21:41:50.000Z
|
FinBot/intent/Loki_Exchange.py
|
Lanlanluuu/LokiHub
|
aae3efb566d2383e78eaa8dc1e8b3f1bb097f2a6
|
[
"MIT"
] | 43
|
2020-12-02T09:03:57.000Z
|
2021-12-23T03:30:25.000Z
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
Loki module for Exchange
Input:
inputSTR str,
utterance str,
args str[],
resultDICT dict
Output:
resultDICT dict
"""
DEBUG_Exchange = True
userDefinedDICT = {"歐元":"EUR",
"美金":"USD",
"日圓":"JPY",
"台幣":"TWD",
"臺幣":"TWD",
"英鎊":"GBP",
"法郎":"CHF",
"澳幣":"AUD",
"港幣":"HKD",
"泰銖":"THB"}
# 將符合句型的參數列表印出。這是 debug 或是開發用的。
def debugInfo(inputSTR, utterance):
if DEBUG_Exchange:
print("[Exchange] {} ===> {}".format(inputSTR, utterance))
def getResult(inputSTR, utterance, args, resultDICT):
debugInfo(inputSTR, utterance)
if utterance == "[100元][美金]可以兌換[台幣]多少":
resultDICT["source"] = args[1]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[0]
pass
if utterance == "[100元][美金]可以兌換多少[台幣]":
resultDICT["source"] = args[1]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[0]
pass
if utterance == "[100元][美金]要[台幣]多少":
resultDICT["source"] = args[1]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[0]
pass
if utterance == "[100元][美金]要多少[台幣]":
resultDICT["source"] = args[1]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[0]
pass
if utterance == "[100台幣]換[美金]":
# 如果 userDefinedDICT 的 某個key x在 args[0] 裡面,就把他的key中的第0個資料拿出來(也就是貨幣的英文)
resultDICT["source"] = [x for x in userDefinedDICT if x in args[0]][0]
resultDICT["target"] = args[1]
resultDICT["amount"] = args[0]
pass
if utterance == "[100美金]能換多少[台幣]":
resultDICT["source"] = [x for x in userDefinedDICT if x in args[0]][0]
resultDICT["target"] = args[1]
resultDICT["amount"] = args[0]
pass
if utterance == "[100美金]要[台幣]多少":
resultDICT["source"] = [x for x in userDefinedDICT if x in args[0]][0]
resultDICT["target"] = args[1]
resultDICT["amount"] = args[0]
pass
if utterance == "[100美金]要多少[台幣]":
resultDICT["source"] = [x for x in userDefinedDICT if x in args[0]][0]
resultDICT["target"] = args[1]
resultDICT["amount"] = args[0]
pass
if utterance == "[今天][美金]兌換[台幣]是多少":
resultDICT["source"] = args[1]
resultDICT["target"] = args[2]
resultDICT["amount"] = None
pass
if utterance == "[美金][100]要[台幣]多少":
resultDICT["source"] = args[0]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[1]
pass
if utterance == "[美金][100]要多少[台幣]":
resultDICT["source"] = args[0]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[1]
pass
if utterance == "[美金][100元]可以兌換[台幣]多少":
resultDICT["source"] = args[0]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[1]
pass
if utterance == "[美金][100元]可以兌換多少[台幣]":
resultDICT["source"] = args[0]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[1]
pass
if utterance == "[美金][100元]要[台幣]多少":
resultDICT["source"] = args[0]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[1]
pass
if utterance == "[美金][100元]要多少[台幣]":
print("IN")
resultDICT["source"] = args[0]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[1]
pass
if utterance == "我想要[100元][美金]":
resultDICT["source"] = args[1]
resultDICT["target"] = None
resultDICT["amount"] = args[0]
pass
if utterance == "我想要[美金][100元]":
resultDICT["source"] = args[0]
resultDICT["target"] = None
resultDICT["amount"] = args[1]
pass
if utterance == "我想買[100元][美金]":
resultDICT["source"] = args[1]
resultDICT["target"] = None
resultDICT["amount"] = args[0]
pass
if utterance == "我想買[美金][100元]":
resultDICT["source"] = args[0]
resultDICT["target"] = None
resultDICT["amount"] = args[1]
pass
if utterance == "[美金][100元]是多少[法郎]":
resultDICT["source"] = args[0]
resultDICT["target"] = args[2]
resultDICT["amount"] = args[1]
pass
return resultDICT
| 28.446541
| 78
| 0.516914
| 496
| 4,523
| 4.709677
| 0.169355
| 0.05137
| 0.162671
| 0.107877
| 0.776969
| 0.757277
| 0.753425
| 0.753425
| 0.753425
| 0.750856
| 0
| 0.038672
| 0.313951
| 4,523
| 159
| 79
| 28.446541
| 0.714148
| 0.066328
| 0
| 0.669492
| 0
| 0
| 0.180685
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016949
| false
| 0.169492
| 0
| 0
| 0.025424
| 0.016949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
63be5f6fc2edffc16d8c259349723231c31bc671
| 613
|
py
|
Python
|
lyrics.py
|
JamesK2754/COVIDBot
|
b4ffaa21873baa1f0c5dfd5b4d5ebb30bfd8d1a4
|
[
"MIT"
] | null | null | null |
lyrics.py
|
JamesK2754/COVIDBot
|
b4ffaa21873baa1f0c5dfd5b4d5ebb30bfd8d1a4
|
[
"MIT"
] | null | null | null |
lyrics.py
|
JamesK2754/COVIDBot
|
b4ffaa21873baa1f0c5dfd5b4d5ebb30bfd8d1a4
|
[
"MIT"
] | null | null | null |
import lyricsgenius
geniustoken = "Akf1AHXpbqaKHSQ06hesk8q1urZkHWJ334bzLr1SwZ1BBPSMGUm3NcbcbDR8ye7Z"
genius = lyricsgenius.Genius(geniustoken)
songname = input("")
def lysearch(songname):
import lyricsgenius
geniustoken = "Akf1AHXpbqaKHSQ06hesk8q1urZkHWJ334bzLr1SwZ1BBPSMGUm3NcbcbDR8ye7Z"
genius = lyricsgenius.Genius(geniustoken)
songname = songname.split("/")
if len(songname) == 1:
song = genius.search_song(songname[0])
elif len(songname) > 1:
song = genius.search_song(songname[0], songname[1])
ly = song.lyrics
return ly
#print(song.lyrics)
| 36.058824
| 85
| 0.722675
| 54
| 613
| 8.166667
| 0.388889
| 0.061224
| 0.131519
| 0.421769
| 0.802721
| 0.802721
| 0.802721
| 0.802721
| 0.802721
| 0
| 0
| 0.061753
| 0.181077
| 613
| 17
| 86
| 36.058824
| 0.816733
| 0.029364
| 0
| 0.4
| 0
| 0
| 0.222798
| 0.221071
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
63c1f522219bcc2dba5b4f17eb780f21296ad3d6
| 90
|
py
|
Python
|
django_rest_auth_embedded/tests/__init__.py
|
Volkova-Natalia/django_rest_auth_embedded
|
43fe1d23f59332a7794365348989599cde44af6e
|
[
"MIT"
] | null | null | null |
django_rest_auth_embedded/tests/__init__.py
|
Volkova-Natalia/django_rest_auth_embedded
|
43fe1d23f59332a7794365348989599cde44af6e
|
[
"MIT"
] | 1
|
2021-02-26T16:56:31.000Z
|
2021-03-24T09:47:43.000Z
|
django_rest_auth_email_confirm_reset/tests/__init__.py
|
Volkova-Natalia/django_rest_auth_email_confirm_reset
|
781e63fd97606e48d69acf84fc6bb011e47b10ca
|
[
"MIT"
] | null | null | null |
from .models import *
from .urls import *
from .views import *
from .integration import *
| 18
| 26
| 0.733333
| 12
| 90
| 5.5
| 0.5
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177778
| 90
| 4
| 27
| 22.5
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
63c2e90768ca94858d6102fd8adcdc5f1544bdda
| 137
|
py
|
Python
|
pycwr/__init__.py
|
1271756664/study
|
8013dd6c597618949c5fcbf86e38502525a8136d
|
[
"MIT"
] | 144
|
2019-11-27T14:36:41.000Z
|
2022-02-23T08:21:17.000Z
|
pycwr/__init__.py
|
1271756664/study
|
8013dd6c597618949c5fcbf86e38502525a8136d
|
[
"MIT"
] | 32
|
2019-11-29T10:11:53.000Z
|
2022-03-14T07:46:44.000Z
|
pycwr/__init__.py
|
1271756664/study
|
8013dd6c597618949c5fcbf86e38502525a8136d
|
[
"MIT"
] | 57
|
2019-11-27T12:51:44.000Z
|
2022-01-29T14:50:05.000Z
|
from . import configure, core, draw, io, interp, retrieve, qc
__all__ = ["configure", "core", "draw", "io", "interp", "qc", "retrieve"]
| 34.25
| 73
| 0.635036
| 17
| 137
| 4.882353
| 0.588235
| 0.313253
| 0.409639
| 0.457831
| 0.60241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145985
| 137
| 3
| 74
| 45.666667
| 0.709402
| 0
| 0
| 0
| 0
| 0
| 0.257353
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
63cd13cafbe9b72881384584902ce2c4c485f091
| 43,992
|
py
|
Python
|
ubertool/terrplant/tests/test_terrplant_unittest.py
|
qed-uber/ubertool
|
472a143e110f634afdfe03d503e5f442b1e57b86
|
[
"Unlicense"
] | 2
|
2016-01-06T20:20:51.000Z
|
2016-03-05T13:26:19.000Z
|
ubertool/terrplant/tests/test_terrplant_unittest.py
|
qed-uber/ubertool
|
472a143e110f634afdfe03d503e5f442b1e57b86
|
[
"Unlicense"
] | 21
|
2017-08-02T18:00:16.000Z
|
2019-08-20T15:57:09.000Z
|
ubertool/terrplant/tests/test_terrplant_unittest.py
|
quanted/ubertool
|
472a143e110f634afdfe03d503e5f442b1e57b86
|
[
"Unlicense"
] | null | null | null |
import datetime
import inspect
import numpy.testing as npt
import os.path
import pandas as pd
import pandas.util.testing as pdt
import sys
from tabulate import tabulate
import unittest
# #find parent directory and import model
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from ..terrplant_exe import Terrplant
test = {}
class TestTerrplant(unittest.TestCase):
"""
Unit tests for terrplant.
"""
print("terrplant unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for terrplant unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open terrplant qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for terrplant unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_terrplant_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty terrplant object
terrplant_empty = Terrplant(df_empty, df_empty)
return terrplant_empty
# each of these functions are queued by "run_methods" and have outputs defined as properties in the terrplant qaqc csv
def test_terrplant_rundry(self):
"""
unittest for function terrplant.rundry
"""
#(self.application_rate/self.incorporation_depth) * self.runoff_fraction
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [0.5, 4.41, 6.048]
try:
terrplant_empty.application_rate = pd.Series([10, 21, 56], dtype='int')
terrplant_empty.incorporation_depth = pd.Series([2, 1, 4], dtype='int')
terrplant_empty.runoff_fraction = pd.Series([0.1, 0.21, 0.432 ], dtype='float')
result = terrplant_empty.run_dry()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_runsemi(self):
"""
unittest for function terrplant.runsemi
"""
#self.out_runsemi = (self.application_rate/self.incorporation_depth) * self.runoff_fraction * 10
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [5.0, 2.5, 19.0]
try:
terrplant_empty.application_rate = pd.Series([10, 20, 30], dtype='int')
terrplant_empty.incorporation_depth = pd.Series([2, 4, 3], dtype='int')
terrplant_empty.runoff_fraction = pd.Series([0.1, 0.05, 0.19], dtype='float')
result = terrplant_empty.run_semi()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_spray(self):
"""
unittest for function terrplant.spray
"""
#self.out_spray = self.application_rate * self.drift_fraction
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [5.0, 5.36, 19.05]
try:
terrplant_empty.application_rate = pd.Series([10, 20, 30], dtype='int')
terrplant_empty.drift_fraction = pd.Series([0.5, .268, 0.635], dtype='float')
result = terrplant_empty.spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_totaldry(self):
"""
unittest for function terrplant.totaldry
"""
#self.out_totaldry = self.out_rundry + self.out_spray
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results =[5.5, 15.65, 35.32]
try:
terrplant_empty.out_run_dry = pd.Series([0.5, 3.65, 12.32], dtype='float')
terrplant_empty.out_spray = pd.Series([5.0, 12.0, 23.0], dtype='float')
result = terrplant_empty.total_dry()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_totalsemi(self):
"""
unittest for function terrplant.totalsemi
"""
#self.out_totalsemi = self.out_runsemi + self.out_spray
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [5.034, 46.52, 71.669, ]
try:
terrplant_empty.out_run_semi = pd.Series([5.0, 12.32, 59.439], dtype='float')
terrplant_empty.out_spray = pd.Series([0.034, 34.2, 12.23], dtype='float')
result = terrplant_empty.total_semi()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nms_rq_dry(self):
"""
unittest for function terrplant.nms_rq_dry
"""
#self.out_nms_rq_dry = self.out_totaldry/self.ec25_nonlisted_seedling_emergence_monocot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [110.0, 1.45211, 0.0669796]
try:
terrplant_empty.out_total_dry = pd.Series([5.5, 17.89, 23.12345], dtype='float')
terrplant_empty.ec25_nonlisted_seedling_emergence_monocot = pd.Series([0.05, 12.32, 345.231], dtype='float')
result = terrplant_empty.nms_rq_dry()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nms_loc_dry(self):
"""
unittest for function terrplant.nms_loc_dry
"""
# if self.out_nms_rq_dry >= 1.0:
# self.out_nms_loc_dry = ('The risk quotient for non-listed monocot seedlings exposed to'\
# ' the pesticide via runoff to a dry area indicates a potential risk.')
# else:
# self.out_nms_loc_dry = ('The risk quotient for non-listed monocot seedlings exposed to'\
# ' the pesticide via runoff to a dry area indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for non-listed monocot seedlings exposed to the "
"pesticide via runoff to dry areas indicates a potential risk.",
"The risk quotient for non-listed monocot seedlings exposed to "
"the pesticide via runoff to dry areas indicates that potential "
"risk is minimal.", "The risk quotient for non-listed monocot "
"seedlings exposed to the pesticide via runoff to dry areas indicates "
"a potential risk."])
try:
terrplant_empty.out_nms_rq_dry = pd.Series([1.0, 0.5, 3.5], dtype='float')
result = terrplant_empty.loc_nms_dry()
pdt.assert_series_equal(result,expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nms_rq_semi(self):
"""
unittest for function terrplant.nms_rq_semi
"""
#self.out_nms_rq_semi = self.out_totalsemi/self.ec25_nonlisted_seedling_emergence_monocot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [200.0, 4.197279, 16.18354]
try:
terrplant_empty.out_total_semi = pd.Series([10., 1.234, 23.984], dtype='float')
terrplant_empty.ec25_nonlisted_seedling_emergence_monocot = pd.Series([0.05, 0.294, 1.482], dtype='float')
result = terrplant_empty.nms_rq_semi()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_out_nms_loc_semi(self):
"""
unittest for function terrplant.nms_loc_semi
"""
# if self.out_nms_rq_semi >= 1.0:
# self.out_nms_loc_semi = ('The risk quotient for non-listed monocot seedlings exposed to'\
# ' the pesticide via runoff to a semi-aquatic area indicates a potential risk.')
# else:
# self.out_nms_loc_semi = ('The risk quotient for non-listed monocot seedlings exposed to the'\
# ' pesticide via runoff to a semi-aquatic area indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for non-listed monocot seedlings exposed to the "
"pesticide via runoff to semi-aquatic areas indicates a potential "
"risk.", "The risk quotient for non-listed monocot seedlings exposed "
"to the pesticide via runoff to semi-aquatic areas indicates that "
"potential risk is minimal.", "The risk quotient for non-listed monocot "
"seedlings exposed to the pesticide via runoff to semi-aquatic areas "
"indicates a potential risk."])
try:
terrplant_empty.out_nms_rq_semi = pd.Series([1.0, 0.45, 2.7], dtype='float')
result = terrplant_empty.loc_nms_semi()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nms_rq_spray(self):
"""
unittest for function terrplant.nms_rq_spray
"""
#self.out_nms_rq_spray = self.out_spray/out__min_nms_spray
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [215.5062, 1.896628, 16.60117]
try:
terrplant_empty.out_spray = pd.Series([5.045, 2.43565, 9.04332], dtype='float')
terrplant_empty.out_min_nms_spray = pd.Series([0.02341, 1.2842, 0.54474], dtype='float')
result = terrplant_empty.nms_rq_spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nms_loc_spray(self):
"""
unittest for function terrplant.nms_loc_spray
"""
# if self.out_nms_rq_spray >= 1.0:
# self.out_nms_loc_spray = ('The risk quotient for non-listed monocot seedlings exposed to'\
# ' the pesticide via spray drift indicates a potential risk.')
# else:
# self.out_nms_loc_spray = ('The risk quotient for non-listed monocot seedlings exposed to the'\
# ' pesticide via spray drift indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for non-listed monocot seedlings exposed to the pesticide via "
"spray drift indicates a potential risk.", "The risk quotient for non-listed monocot "
"seedlings exposed to the pesticide via spray drift indicates that potential risk "
"is minimal.", "The risk quotient for non-listed monocot seedlings exposed to the "
"pesticide via spray drift indicates a potential risk."])
try:
terrplant_empty.out_nms_rq_spray = pd.Series([2.2, 0.0056, 1.0], dtype='float')
result = terrplant_empty.loc_nms_spray()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lms_rq_dry(self):
"""
unittest for function terrplant.lms_rq_dry
"""
#self.out_lms_rq_dry = self.out_totaldry/self.ec25_nonlisted_seedling_emergence_dicot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [550.0, 3.40279, 234.0831]
try:
terrplant_empty.out_total_dry = pd.Series([5.5, 1.094, 19.5436], dtype='float')
terrplant_empty.noaec_listed_seedling_emergence_monocot = pd.Series([0.01, 0.3215, 0.08349], dtype='float')
result = terrplant_empty.lms_rq_dry()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lms_loc_dry(self):
"""
unittest for function terrplant.lms_loc_dry
"""
# if self.out_lms_rq_dry >= 1.0:
# self.out_lms_loc_dry = ('The risk quotient for listed monocot seedlings exposed to'\
# ' the pesticide via runoff to a dry area indicates a potential risk.')
# else:
# self.out_lms_loc_dry = ('The risk quotient for listed monocot seedlings exposed to the'\
# ' pesticide via runoff to a dry area indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for listed monocot seedlings exposed to the pesticide "
"via runoff to dry areas indicates a potential risk.", "The risk quotient "
"for listed monocot seedlings exposed to the pesticide via runoff to dry "
"areas indicates that potential risk is minimal.", "The risk quotient for "
"listed monocot seedlings exposed to the pesticide via runoff to dry areas "
"indicates a potential risk."])
try:
terrplant_empty.out_lms_rq_dry = pd.Series([1.6, 0.045, 1.0], dtype='float')
result = terrplant_empty.loc_lms_dry()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lms_rq_semi(self):
"""
unittest for function terrplant.lms_rq_semi
"""
#self.out_lms_rq_semi = self.out_totalsemi/self.ec25_nonlisted_seedling_emergence_dicot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [1000.0, 0.0217295, 72.19618]
try:
terrplant_empty.out_total_semi = pd.Series([10., 0.099, 24.5467], dtype='float')
terrplant_empty.noaec_listed_seedling_emergence_monocot = pd.Series([0.01, 4.556, 0.34], dtype='float')
result = terrplant_empty.lms_rq_semi()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lms_loc_semi(self):
"""
unittest for function terrplant.lms_loc_semi
"""
# if self.out_lms_rq_semi >= 1.0:
# self.out_lms_loc_semi = ('The risk quotient for listed monocot seedlings exposed to'\
# ' the pesticide via runoff to a semi-aquatic area indicates a potential risk.')
# else:
# self.out_lms_loc_semi = ('The risk quotient for listed monocot seedlings exposed to the'\
# ' pesticide via runoff to a semi-aquatic area indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for listed monocot seedlings exposed to the pesticide via "
"runoff to semi-aquatic areas indicates a potential risk.", "The risk quotient "
"for listed monocot seedlings exposed to the pesticide via runoff to "
"semi-aquatic areas indicates that potential risk is minimal.", "The risk "
"quotient for listed monocot seedlings exposed to the pesticide via runoff "
"to semi-aquatic areas indicates a potential risk."])
try:
terrplant_empty.out_lms_rq_semi = pd.Series([1.0, 0.9, 6.456], dtype= 'float')
result = terrplant_empty.loc_lms_semi()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lms_rq_spray(self):
"""
unittest for function terrplant.lms_rq_spray
"""
#self.out_lms_rq_spray = self.out_spray/self.ec25_nonlisted_seedling_emergence_dicot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [500.0, 3.754362, 0.04772294]
try:
terrplant_empty.out_spray = pd.Series([5., 9.1231, 0.09231], dtype='float')
terrplant_empty.out_min_lms_spray = pd.Series([0.01, 2.43, 1.93429], dtype='float')
result = terrplant_empty.lms_rq_spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lms_loc_spray(self):
"""
unittest for function terrplant.lms_loc_spray
"""
# if self.out_lms_rq_spray >= 1.0:
# self.out_lms_loc_spray = ('The risk quotient for listed monocot seedlings exposed to'\
# ' the pesticide via spray drift indicates a potential risk.')
# else:
# self.out_lms_loc_spray = ('The risk quotient for listed monocot seedlings exposed to the'\
# ' pesticide via spray drift indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for listed monocot seedlings exposed "
"to the pesticide via spray drift indicates a potential "
"risk.", "The risk quotient for listed monocot seedlings "
"exposed to the pesticide via spray drift indicates that "
"potential risk is minimal.", "The risk quotient for "
"listed monocot seedlings exposed to the pesticide via "
"spray drift indicates a potential risk."])
try:
terrplant_empty.out_lms_rq_spray = pd.Series([1.1, 0.99, 3.129], dtype= 'float')
result = terrplant_empty.loc_lms_spray()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nds_rq_dry(self):
"""
unittest for function terrplant.nds_rq_dry
"""
#self.out_nds_rq_dry = self.out_totaldry/self.noaec_listed_seedling_emergence_monocot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [275., 1.012424, 9.062258]
try:
terrplant_empty.out_total_dry = pd.Series([5.5, 1.0023, 19.32436], dtype='float')
terrplant_empty.ec25_nonlisted_seedling_emergence_dicot = pd.Series([0.02, 0.99, 2.1324], dtype='float')
result = terrplant_empty.nds_rq_dry()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nds_loc_dry(self):
"""
unittest for function terrplant.nds_loc_dry
"""
# if self.out_nds_rq_dry >= 1.0:
# self.out_nds_loc_dry = ('The risk quotient for non-listed monocot seedlings exposed to'\
# ' the pesticide via runoff to dry areas indicates a potential risk.')
# else:
# self.out_nds_loc_dry = ('The risk quotient for non-listed monocot seedlings exposed to the'\
# ' pesticide via runoff to dry areas indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for non-listed dicot seedlings exposed to the "
"pesticide via runoff to dry areas indicates a potential "
"risk.", "The risk quotient for non-listed dicot seedlings "
"exposed to the pesticide via runoff to dry areas indicates "
"that potential risk is minimal.", "The risk quotient for "
"non-listed dicot seedlings exposed to the pesticide via runoff "
"to dry areas indicates a potential risk."])
try:
terrplant_empty.out_nds_rq_dry = pd.Series([2.7, 0.923, 1.0], dtype='float')
result = terrplant_empty.loc_nds_dry()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nds_rq_semi(self):
"""
unittest for function terrplant.nds_rq_semi
"""
#self.out_nds_rq_semi = self.out_totalsemi/self.noaec_listed_seedling_emergence_monocot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [500., 3.464141, 0.999986]
try:
terrplant_empty.out_total_semi = pd.Series([10., 3.4295, 12.82323], dtype='float')
terrplant_empty.ec25_nonlisted_seedling_emergence_dicot = pd.Series([0.02, 0.99, 12.8234], dtype='float')
result = terrplant_empty.nds_rq_semi()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nds_loc_semi(self):
"""
unittest for function terrplant.nds_loc_semi
"""
# if self.out_nds_rq_semi >= 1.0:
# self.out_nds_loc_semi = ('The risk quotient for non-listed monocot seedlings exposed to'\
# ' the pesticide via runoff to semi-aquatic areas indicates a potential risk.')
# else:
# self.out_nds_loc_semi = ('The risk quotient for non-listed monocot seedlings exposed to the'\
# ' pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for non-listed dicot seedlings exposed to the "
"pesticide via runoff to semi-aquatic areas indicates a potential "
"risk.", "The risk quotient for non-listed dicot seedlings exposed "
"to the pesticide via runoff to semi-aquatic areas indicates that "
"potential risk is minimal.", "The risk quotient for non-listed "
"dicot seedlings exposed to the pesticide via runoff to semi-aquatic "
"areas indicates a potential risk."])
try:
terrplant_empty.out_nds_rq_semi = pd.Series([1.7, 0.001, 2.3134], dtype='float')
result = terrplant_empty.loc_nds_semi()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nds_rq_spray(self):
"""
unittest for function terrplant.nds_rq_spray
"""
#self.out_nds_rq_spray = self.out_spray/self.noaec_listed_seedling_emergence_monocot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [235.5158, 0.2584818, 1.994142]
try:
terrplant_empty.out_spray = pd.Series([5., 0.9912, 23.9321], dtype='float')
terrplant_empty.out_min_nds_spray = pd.Series([0.02123, 3.8347, 12.0012], dtype='float')
result = terrplant_empty.nds_rq_spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_nds_loc_spray(self):
"""
unittest for function terrplant.nds_loc_spray
"""
# if self.out_nds_rq_spray >= 1.0:
# self.out_nds_loc_semi = ('The risk quotient for non-listed monocot seedlings exposed to'\
# ' the pesticide via spray drift indicates a potential risk.')
# else:
# self.out_nds_loc_semi = ('The risk quotient for non-listed monocot seedlings exposed to the'\
# ' pesticide via spray drift indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for non-listed dicot seedlings exposed to the "
"pesticide via spray drift indicates a potential risk.", "The "
"risk quotient for non-listed dicot seedlings exposed to the "
"pesticide via spray drift indicates that potential risk is "
"minimal.", "The risk quotient for non-listed dicot seedlings "
"exposed to the pesticide via spray drift indicates a potential risk."])
try:
terrplant_empty.out_nds_rq_spray = pd.Series([1.2, 0.439, 3.9921], dtype='float')
result = terrplant_empty.loc_nds_spray()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lds_rq_dry(self):
"""
unittest for function terrplant.lds_rq_dry
"""
#self.out_lds_rq_dry = self.out_totaldry/self.noaec_listed_seedling_emergence_dicot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [55., 1.001862, 6.043703]
try:
terrplant_empty.out_total_dry = pd.Series([5.5, 0.991843, 12.7643], dtype='float')
terrplant_empty.noaec_listed_seedling_emergence_dicot = pd.Series([0.1, .99, 2.112], dtype='float')
result = terrplant_empty.lds_rq_dry()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lds_loc_dry(self):
"""
unittest for function terrplant.lds_loc_dry
"""
# if self.out_lds_rq_dry >= 1.0:
# self.out_lds_loc_dry = ('The risk quotient for listed monocot seedlings exposed to'\
# ' the pesticide via runoff to dry areas indicates a potential risk.')
# else:
# self.out_lds_loc_dry = ('The risk quotient for listed monocot seedlings exposed to the'\
# ' pesticide via runoff to dry areas indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for listed dicot seedlings exposed to the "
"pesticide via runoff to dry areas indicates a potential "
"risk.", "The risk quotient for listed dicot seedlings exposed "
"to the pesticide via runoff to dry areas indicates that "
"potential risk is minimal.", "The risk quotient for listed "
"dicot seedlings exposed to the pesticide via runoff to dry "
"areas indicates a potential risk."])
try:
terrplant_empty.out_lds_rq_dry = pd.Series([1.5, 0.00856, 4.2893], dtype= 'float')
result = terrplant_empty.loc_lds_dry()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lds_rq_semi(self):
"""
unittest for function terrplant.lds_rq_semi
"""
#self.out_lds_rq_semi = self.out_totalsemi/self.noaec_listed_seedling_emergence_dicot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [100., 2502.0289, 16.08304]
try:
terrplant_empty.out_total_semi = pd.Series([10., 0.8632, 34.2321], dtype='float')
terrplant_empty.noaec_listed_seedling_emergence_dicot = pd.Series([0.1, 0.000345, 2.12846], dtype='float')
result = terrplant_empty.lds_rq_semi()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lds_loc_semi(self):
"""
unittest for function terrplant.lds_loc_semi
"""
# if self.out_lds_rq_semi >= 1.0:
# self.out_lds_loc_semi = ('The risk quotient for listed monocot seedlings exposed to'\
# ' the pesticide via runoff to semi-aquatic areas indicates a potential risk.')
# else:
# self.out_lds_loc_semi = ('The risk quotient for listed monocot seedlings exposed to the'\
# ' pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for listed dicot seedlings exposed to the "
"pesticide via runoff to semi-aquatic areas indicates a potential "
"risk.", "The risk quotient for listed dicot seedlings exposed to "
"the pesticide via runoff to semi-aquatic areas indicates that "
"potential risk is minimal.", "The risk quotient for listed dicot "
"seedlings exposed to the pesticide via runoff to semi-aquatic "
"areas indicates a potential risk."])
try:
terrplant_empty.out_lds_rq_semi = pd.Series([4.5, 0.0028, 1.0], dtype= 'float')
result = terrplant_empty.loc_lds_semi()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lds_rq_spray(self):
"""
unittest for function terrplant.lds_rq_spray
"""
#self.out_lds_rq_spray = self.out_spray/self.noaec_listed_seedling_emergence_dicot
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [250., 0.7105719, 1.28799]
try:
terrplant_empty.out_spray = pd.Series([5.0, 0.94435, 12.7283], dtype='float')
terrplant_empty.out_min_lds_spray = pd.Series([0.02, 1.329, 9.8823], dtype='float')
result = terrplant_empty.lds_rq_spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_lds_loc_spray(self):
"""
unittest for function terrplant.lds_loc_spray
"""
# if self.out_lds_rq_spray >= 1.0:
# self.out_lds_loc_spray = ('The risk quotient for listed monocot seedlings exposed to'\
# ' the pesticide via spray drift indicates a potential risk.')
# else:
# self.out_lds_loc_spray = ('The risk quotient for listed monocot seedlings exposed to the'\
# ' pesticide via spray drift indicates that potential risk is minimal.')
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = pd.Series(["The risk quotient for listed dicot seedlings exposed to the "
"pesticide via spray drift indicates a potential risk.", "The "
"risk quotient for listed dicot seedlings exposed to the "
"pesticide via spray drift indicates that potential risk is "
"minimal.", "The risk quotient for listed dicot seedlings "
"exposed to the pesticide via spray drift indicates a potential "
"risk."])
try:
terrplant_empty.out_lds_rq_spray = pd.Series([1.8, 0.956, 3.25], dtype='float')
result = terrplant_empty.loc_lds_spray()
pdt.assert_series_equal(result, expected_results, True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_min_nms_spray(self):
"""
unittest for function terrplant.min_nms_spray
"""
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [0.0501, 0.9999, 1.9450]
try:
terrplant_empty.ec25_nonlisted_seedling_emergence_monocot = pd.Series([0.0501, 1.0004, 12.943], dtype='float')
terrplant_empty.ec25_nonlisted_vegetative_vigor_monocot = pd.Series([0.0801, 0.9999, 1.9450], dtype='float')
result = terrplant_empty.min_nms_spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_min_lms_spray(self):
"""
unittest for function terrplant.min_lms_spray
"""
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [0.0205, 1.9234, 0.000453]
try:
terrplant_empty.noaec_listed_vegetative_vigor_monocot = pd.Series([0.0211, 1.9234, 0.001112], dtype='float')
terrplant_empty.noaec_listed_seedling_emergence_monocot = pd.Series([0.0205, 3.231, 0.000453], dtype='float')
result = terrplant_empty.min_lms_spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_min_nds_spray(self):
"""
unittest for function terrplant.min_nds_spray
"""
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [0.0325, 0.00342, 1.3456]
try:
terrplant_empty.ec25_nonlisted_vegetative_vigor_dicot = pd.Series([0.0325, 3.432, 1.3456], dtype='float')
terrplant_empty.ec25_nonlisted_seedling_emergence_dicot = pd.Series([0.5022, 0.00342, 1.34567], dtype='float')
result = terrplant_empty.min_nds_spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_terrplant_min_lds_spray(self):
"""
unittest for function terrplant.min_lds_spray
"""
# create empty pandas dataframes to create empty object for this unittest
terrplant_empty = self.create_terrplant_object()
expected_results = [0.3206, 1.00319, 12.32]
try:
terrplant_empty.noaec_listed_seedling_emergence_dicot = pd.Series([0.3206, 1.0032, 43.4294], dtype='float')
terrplant_empty.noaec_listed_vegetative_vigor_dicot = pd.Series([0.5872, 1.00319, 12.32], dtype='float')
result = terrplant_empty.min_lds_spray()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
# unittest will
# 1) call the setup method,
# 2) then call every method starting with "test",
# 3) then the teardown method
if __name__ == '__main__':
unittest.main()
| 49.098214
| 122
| 0.618044
| 5,355
| 43,992
| 4.882166
| 0.063866
| 0.066401
| 0.053014
| 0.04131
| 0.917228
| 0.889535
| 0.859662
| 0.784769
| 0.77207
| 0.75742
| 0
| 0.034644
| 0.290053
| 43,992
| 896
| 123
| 49.098214
| 0.802446
| 0.240726
| 0
| 0.599631
| 0
| 0
| 0.161954
| 0
| 0
| 0
| 0
| 0
| 0.060886
| 1
| 0.066421
| false
| 0.00369
| 0.01845
| 0
| 0.149446
| 0.184502
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8932628f1e0bc2d8c1fc917f3837c4fdac64e6f9
| 192
|
py
|
Python
|
wireapp/wireapp/doctype/mpesa_payment/mpesa_payment.py
|
saleemdev/wireapp
|
7d39d07391ddad23539cfdf38369082f708d7294
|
[
"MIT"
] | null | null | null |
wireapp/wireapp/doctype/mpesa_payment/mpesa_payment.py
|
saleemdev/wireapp
|
7d39d07391ddad23539cfdf38369082f708d7294
|
[
"MIT"
] | null | null | null |
wireapp/wireapp/doctype/mpesa_payment/mpesa_payment.py
|
saleemdev/wireapp
|
7d39d07391ddad23539cfdf38369082f708d7294
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, Salim and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class MPESAPayment(Document):
pass
| 21.333333
| 49
| 0.791667
| 25
| 192
| 6.08
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024242
| 0.140625
| 192
| 8
| 50
| 24
| 0.89697
| 0.541667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
893a18910761a8b9355e949965665e778b901cd2
| 187
|
py
|
Python
|
nixnet/_session/__init__.py
|
ni-ldp/nixnet-python
|
83f30c5b44098de0dc4828838e263b7be0866228
|
[
"MIT"
] | 16
|
2017-06-14T19:44:45.000Z
|
2022-02-06T15:14:52.000Z
|
nixnet/_session/__init__.py
|
ni-ldp/nixnet-python
|
83f30c5b44098de0dc4828838e263b7be0866228
|
[
"MIT"
] | 216
|
2017-06-15T16:41:10.000Z
|
2021-09-23T23:00:50.000Z
|
nixnet/_session/__init__.py
|
ni-ldp/nixnet-python
|
83f30c5b44098de0dc4828838e263b7be0866228
|
[
"MIT"
] | 23
|
2017-06-14T22:51:08.000Z
|
2022-03-03T03:04:40.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import typing # NOQA: F401
__all__ = [] # type: typing.List[typing.Text]
| 23.375
| 46
| 0.791444
| 25
| 187
| 5.24
| 0.6
| 0.229008
| 0.366412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01875
| 0.144385
| 187
| 7
| 47
| 26.714286
| 0.79375
| 0.219251
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.8
| null | null | 0.2
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8975af95154909e66feae919c1c23a3719a39dc8
| 66,893
|
py
|
Python
|
Assignment2_lastrun.py
|
iamnavya-agg/Emotion-Categorization-experiemnt
|
c7cc1a17cbdb414a07cecddb88b4299a1ba51629
|
[
"MIT"
] | null | null | null |
Assignment2_lastrun.py
|
iamnavya-agg/Emotion-Categorization-experiemnt
|
c7cc1a17cbdb414a07cecddb88b4299a1ba51629
|
[
"MIT"
] | 4
|
2020-03-12T19:22:46.000Z
|
2022-03-12T00:09:38.000Z
|
Assignment2_lastrun.py
|
iamnavya-agg/Emotion-Categorization-experiemnt
|
c7cc1a17cbdb414a07cecddb88b4299a1ba51629
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy3 Experiment Builder (v3.2.3),
on Sun Oct 13 21:34:23 2019
If you publish work using this script the most relevant publication is:
Peirce J, Gray JR, Simpson S, MacAskill M, Höchenberger R, Sogo H, Kastman E, Lindeløv JK. (2019)
PsychoPy2: Experiments in behavior made easy Behav Res 51: 195.
https://doi.org/10.3758/s13428-018-01193-y
"""
from __future__ import absolute_import, division
from psychopy import locale_setup
from psychopy import prefs
from psychopy import sound, gui, visual, core, data, event, logging, clock
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import sys # to get file system encoding
from psychopy.hardware import keyboard
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
psychopyVersion = '3.2.3'
expName = 'Assignment2' # from the Builder filename that created this script
expInfo = {'participant': '', 'session': '001'}
dlg = gui.DlgFromDict(dictionary=expInfo, sortKeys=False, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
expInfo['psychopyVersion'] = psychopyVersion
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath='/Users/pragyagandhi/Desktop/FinalExperiment/Assignment2_lastrun.py',
savePickle=True, saveWideText=True,
dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
frameTolerance = 0.001 # how close to onset before 'same' frame
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(
size=[1440, 900], fullscr=True, screen=0,
winType='pyglet', allowGUI=False, allowStencil=False,
monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True,
units='height')
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# create a default keyboard (e.g. to check for escape)
defaultKeyboard = keyboard.Keyboard()
# Initialize components for Routine "Intoduction"
IntoductionClock = core.Clock()
text = visual.TextStim(win=win, name='text',
text='Hello,\nWelcome to the experiment!\nIn this experiment, there are two parts. \nPress enter!',
font='Arial',
pos=(0,0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp = keyboard.Keyboard()
# Initialize components for Routine "trial1"
trial1Clock = core.Clock()
text_9 = visual.TextStim(win=win, name='text_9',
text='Welcome to part 1.\nPress enter to see the rules.',
font='Arial',
pos=(0, 0), height=0.07, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp_7 = keyboard.Keyboard()
# Initialize components for Routine "Rules"
RulesClock = core.Clock()
text_6 = visual.TextStim(win=win, name='text_6',
text='In this experiment your task is to guess if the shown face was happy or not.\nFirst, you will be shown a face.\nThen, there will a noise for 0.1 seconds.\nNext, there will be a blank screen for 2 seconds.\nIn this,\n press the right key if shown face was happy.\n press the left key if shown face was sad.\n\n\n\nPress enter to start the experiment!',
font='Arial',
pos=(0, 0), height=0.03, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp_3 = keyboard.Keyboard()
# Initialize components for Routine "plus"
plusClock = core.Clock()
text_4 = visual.TextStim(win=win, name='text_4',
text='+',
font='Arial',
pos=(0, 0), height=0.2, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
# Initialize components for Routine "Experiment1"
Experiment1Clock = core.Clock()
imageGuess = visual.ImageStim(
win=win,
name='imageGuess',
image='sin', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=0.0)
# Initialize components for Routine "sound1"
sound1Clock = core.Clock()
image_2 = visual.ImageStim(
win=win,
name='image_2',
image='noise.jpg', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=0.0)
# Initialize components for Routine "empty1"
empty1Clock = core.Clock()
text_8 = visual.TextStim(win=win, name='text_8',
text=None,
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp_8 = keyboard.Keyboard()
# Initialize components for Routine "trial2"
trial2Clock = core.Clock()
text_3 = visual.TextStim(win=win, name='text_3',
text='Welcome to Experiment 2.\nPress enter to see the rules',
font='Arial',
pos=(0, 0), height=0.07, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp_4 = keyboard.Keyboard()
# Initialize components for Routine "Rules2"
Rules2Clock = core.Clock()
text_7 = visual.TextStim(win=win, name='text_7',
text="In this experiment you have to tell if the face was happy,sad or angry.\nYou will be shown a face.\nThen, there will be a noise for 0.1 seconds.\nNext,There will be blank screen for 2 seconds.\nYou have to:\n -Press 'h' if the face is happy.\n -Press 's' if the face is sad.\n -Press 'a' if the face is angry.\nPress enter to start the experiment",
font='Arial',
pos=(0, 0), height=0.03, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp_5 = keyboard.Keyboard()
# Initialize components for Routine "Plus"
PlusClock = core.Clock()
text_5 = visual.TextStim(win=win, name='text_5',
text='+',
font='Arial',
pos=(0, 0), height=0.2, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
# Initialize components for Routine "Experiment2"
Experiment2Clock = core.Clock()
image = visual.ImageStim(
win=win,
name='image',
image='sin', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=0.0)
# Initialize components for Routine "sound2"
sound2Clock = core.Clock()
image_3 = visual.ImageStim(
win=win,
name='image_3',
image='noise.jpg', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=0.0)
# Initialize components for Routine "empty2"
empty2Clock = core.Clock()
text_10 = visual.TextStim(win=win, name='text_10',
text=None,
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp_2 = keyboard.Keyboard()
# Initialize components for Routine "Thankyou"
ThankyouClock = core.Clock()
text_2 = visual.TextStim(win=win, name='text_2',
text='Thankyou!!',
font='Arial',
pos=(0,0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# ------Prepare to start Routine "Intoduction"-------
routineTimer.add(5.000000)
# update component parameters for each repeat
key_resp.keys = []
key_resp.rt = []
# keep track of which components have finished
IntoductionComponents = [text, key_resp]
for thisComponent in IntoductionComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
IntoductionClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "Intoduction"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = IntoductionClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=IntoductionClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text* updates
if text.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text.frameNStart = frameN # exact frame index
text.tStart = t # local t and not account for scr refresh
text.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text, 'tStartRefresh') # time at next scr refresh
text.setAutoDraw(True)
if text.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > text.tStartRefresh + 5-frameTolerance:
# keep track of stop time/frame for later
text.tStop = t # not accounting for scr refresh
text.frameNStop = frameN # exact frame index
win.timeOnFlip(text, 'tStopRefresh') # time at next scr refresh
text.setAutoDraw(False)
# *key_resp* updates
waitOnFlip = False
if key_resp.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp.frameNStart = frameN # exact frame index
key_resp.tStart = t # local t and not account for scr refresh
key_resp.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp, 'tStartRefresh') # time at next scr refresh
key_resp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > key_resp.tStartRefresh + 5-frameTolerance:
# keep track of stop time/frame for later
key_resp.tStop = t # not accounting for scr refresh
key_resp.frameNStop = frameN # exact frame index
win.timeOnFlip(key_resp, 'tStopRefresh') # time at next scr refresh
key_resp.status = FINISHED
if key_resp.status == STARTED and not waitOnFlip:
theseKeys = key_resp.getKeys(keyList=['return'], waitRelease=False)
if len(theseKeys):
theseKeys = theseKeys[0] # at least one key was pressed
# check for quit:
if "escape" == theseKeys:
endExpNow = True
key_resp.keys = theseKeys.name # just the last key pressed
key_resp.rt = theseKeys.rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in IntoductionComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Intoduction"-------
for thisComponent in IntoductionComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('text.started', text.tStartRefresh)
thisExp.addData('text.stopped', text.tStopRefresh)
# check responses
if key_resp.keys in ['', [], None]: # No response was made
key_resp.keys = None
thisExp.addData('key_resp.keys',key_resp.keys)
if key_resp.keys != None: # we had a response
thisExp.addData('key_resp.rt', key_resp.rt)
thisExp.addData('key_resp.started', key_resp.tStartRefresh)
thisExp.addData('key_resp.stopped', key_resp.tStopRefresh)
thisExp.nextEntry()
# ------Prepare to start Routine "trial1"-------
# update component parameters for each repeat
key_resp_7.keys = []
key_resp_7.rt = []
# keep track of which components have finished
trial1Components = [text_9, key_resp_7]
for thisComponent in trial1Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
trial1Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "trial1"-------
while continueRoutine:
# get current time
t = trial1Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=trial1Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_9* updates
if text_9.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_9.frameNStart = frameN # exact frame index
text_9.tStart = t # local t and not account for scr refresh
text_9.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_9, 'tStartRefresh') # time at next scr refresh
text_9.setAutoDraw(True)
# *key_resp_7* updates
waitOnFlip = False
if key_resp_7.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp_7.frameNStart = frameN # exact frame index
key_resp_7.tStart = t # local t and not account for scr refresh
key_resp_7.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_7, 'tStartRefresh') # time at next scr refresh
key_resp_7.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_7.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_7.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_7.status == STARTED and not waitOnFlip:
theseKeys = key_resp_7.getKeys(keyList=['return'], waitRelease=False)
if len(theseKeys):
theseKeys = theseKeys[0] # at least one key was pressed
# check for quit:
if "escape" == theseKeys:
endExpNow = True
key_resp_7.keys = theseKeys.name # just the last key pressed
key_resp_7.rt = theseKeys.rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trial1Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "trial1"-------
for thisComponent in trial1Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('text_9.started', text_9.tStartRefresh)
thisExp.addData('text_9.stopped', text_9.tStopRefresh)
# check responses
if key_resp_7.keys in ['', [], None]: # No response was made
key_resp_7.keys = None
thisExp.addData('key_resp_7.keys',key_resp_7.keys)
if key_resp_7.keys != None: # we had a response
thisExp.addData('key_resp_7.rt', key_resp_7.rt)
thisExp.addData('key_resp_7.started', key_resp_7.tStartRefresh)
thisExp.addData('key_resp_7.stopped', key_resp_7.tStopRefresh)
thisExp.nextEntry()
# the Routine "trial1" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "Rules"-------
# update component parameters for each repeat
key_resp_3.keys = []
key_resp_3.rt = []
# keep track of which components have finished
RulesComponents = [text_6, key_resp_3]
for thisComponent in RulesComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
RulesClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "Rules"-------
while continueRoutine:
# get current time
t = RulesClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=RulesClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_6* updates
if text_6.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_6.frameNStart = frameN # exact frame index
text_6.tStart = t # local t and not account for scr refresh
text_6.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_6, 'tStartRefresh') # time at next scr refresh
text_6.setAutoDraw(True)
# *key_resp_3* updates
waitOnFlip = False
if key_resp_3.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp_3.frameNStart = frameN # exact frame index
key_resp_3.tStart = t # local t and not account for scr refresh
key_resp_3.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_3, 'tStartRefresh') # time at next scr refresh
key_resp_3.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_3.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_3.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_3.status == STARTED and not waitOnFlip:
theseKeys = key_resp_3.getKeys(keyList=['return'], waitRelease=False)
if len(theseKeys):
theseKeys = theseKeys[0] # at least one key was pressed
# check for quit:
if "escape" == theseKeys:
endExpNow = True
key_resp_3.keys = theseKeys.name # just the last key pressed
key_resp_3.rt = theseKeys.rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in RulesComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Rules"-------
for thisComponent in RulesComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('text_6.started', text_6.tStartRefresh)
thisExp.addData('text_6.stopped', text_6.tStopRefresh)
# check responses
if key_resp_3.keys in ['', [], None]: # No response was made
key_resp_3.keys = None
thisExp.addData('key_resp_3.keys',key_resp_3.keys)
if key_resp_3.keys != None: # we had a response
thisExp.addData('key_resp_3.rt', key_resp_3.rt)
thisExp.addData('key_resp_3.started', key_resp_3.tStartRefresh)
thisExp.addData('key_resp_3.stopped', key_resp_3.tStopRefresh)
thisExp.nextEntry()
# the Routine "Rules" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=1, method='random',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('psychtest.xlsx'),
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
# ------Prepare to start Routine "plus"-------
routineTimer.add(0.200000)
# update component parameters for each repeat
# keep track of which components have finished
plusComponents = [text_4]
for thisComponent in plusComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
plusClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "plus"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = plusClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=plusClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_4* updates
if text_4.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_4.frameNStart = frameN # exact frame index
text_4.tStart = t # local t and not account for scr refresh
text_4.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_4, 'tStartRefresh') # time at next scr refresh
text_4.setAutoDraw(True)
if text_4.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > text_4.tStartRefresh + 0.200-frameTolerance:
# keep track of stop time/frame for later
text_4.tStop = t # not accounting for scr refresh
text_4.frameNStop = frameN # exact frame index
win.timeOnFlip(text_4, 'tStopRefresh') # time at next scr refresh
text_4.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in plusComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "plus"-------
for thisComponent in plusComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
trials.addData('text_4.started', text_4.tStartRefresh)
trials.addData('text_4.stopped', text_4.tStopRefresh)
# ------Prepare to start Routine "Experiment1"-------
routineTimer.add(0.080000)
# update component parameters for each repeat
imageGuess.setImage(Image1)
# keep track of which components have finished
Experiment1Components = [imageGuess]
for thisComponent in Experiment1Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
Experiment1Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "Experiment1"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = Experiment1Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=Experiment1Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *imageGuess* updates
if imageGuess.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
imageGuess.frameNStart = frameN # exact frame index
imageGuess.tStart = t # local t and not account for scr refresh
imageGuess.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(imageGuess, 'tStartRefresh') # time at next scr refresh
imageGuess.setAutoDraw(True)
if imageGuess.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > imageGuess.tStartRefresh + 0.08-frameTolerance:
# keep track of stop time/frame for later
imageGuess.tStop = t # not accounting for scr refresh
imageGuess.frameNStop = frameN # exact frame index
win.timeOnFlip(imageGuess, 'tStopRefresh') # time at next scr refresh
imageGuess.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in Experiment1Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Experiment1"-------
for thisComponent in Experiment1Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
trials.addData('imageGuess.started', imageGuess.tStartRefresh)
trials.addData('imageGuess.stopped', imageGuess.tStopRefresh)
# ------Prepare to start Routine "sound1"-------
routineTimer.add(0.100000)
# update component parameters for each repeat
# keep track of which components have finished
sound1Components = [image_2]
for thisComponent in sound1Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
sound1Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "sound1"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = sound1Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=sound1Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *image_2* updates
if image_2.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
image_2.frameNStart = frameN # exact frame index
image_2.tStart = t # local t and not account for scr refresh
image_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(image_2, 'tStartRefresh') # time at next scr refresh
image_2.setAutoDraw(True)
if image_2.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > image_2.tStartRefresh + 0.1-frameTolerance:
# keep track of stop time/frame for later
image_2.tStop = t # not accounting for scr refresh
image_2.frameNStop = frameN # exact frame index
win.timeOnFlip(image_2, 'tStopRefresh') # time at next scr refresh
image_2.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in sound1Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "sound1"-------
for thisComponent in sound1Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
trials.addData('image_2.started', image_2.tStartRefresh)
trials.addData('image_2.stopped', image_2.tStopRefresh)
# ------Prepare to start Routine "empty1"-------
routineTimer.add(2.000000)
# update component parameters for each repeat
key_resp_8.keys = []
key_resp_8.rt = []
# keep track of which components have finished
empty1Components = [text_8, key_resp_8]
for thisComponent in empty1Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
empty1Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "empty1"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = empty1Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=empty1Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_8* updates
if text_8.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_8.frameNStart = frameN # exact frame index
text_8.tStart = t # local t and not account for scr refresh
text_8.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_8, 'tStartRefresh') # time at next scr refresh
text_8.setAutoDraw(True)
if text_8.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > text_8.tStartRefresh + 2.0-frameTolerance:
# keep track of stop time/frame for later
text_8.tStop = t # not accounting for scr refresh
text_8.frameNStop = frameN # exact frame index
win.timeOnFlip(text_8, 'tStopRefresh') # time at next scr refresh
text_8.setAutoDraw(False)
# *key_resp_8* updates
waitOnFlip = False
if key_resp_8.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp_8.frameNStart = frameN # exact frame index
key_resp_8.tStart = t # local t and not account for scr refresh
key_resp_8.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_8, 'tStartRefresh') # time at next scr refresh
key_resp_8.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_8.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_8.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_8.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > key_resp_8.tStartRefresh + 2-frameTolerance:
# keep track of stop time/frame for later
key_resp_8.tStop = t # not accounting for scr refresh
key_resp_8.frameNStop = frameN # exact frame index
win.timeOnFlip(key_resp_8, 'tStopRefresh') # time at next scr refresh
key_resp_8.status = FINISHED
if key_resp_8.status == STARTED and not waitOnFlip:
theseKeys = key_resp_8.getKeys(keyList=['right', 'left'], waitRelease=False)
if len(theseKeys):
theseKeys = theseKeys[0] # at least one key was pressed
# check for quit:
if "escape" == theseKeys:
endExpNow = True
key_resp_8.keys = theseKeys.name # just the last key pressed
key_resp_8.rt = theseKeys.rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in empty1Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "empty1"-------
for thisComponent in empty1Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
trials.addData('text_8.started', text_8.tStartRefresh)
trials.addData('text_8.stopped', text_8.tStopRefresh)
# check responses
if key_resp_8.keys in ['', [], None]: # No response was made
key_resp_8.keys = None
trials.addData('key_resp_8.keys',key_resp_8.keys)
if key_resp_8.keys != None: # we had a response
trials.addData('key_resp_8.rt', key_resp_8.rt)
trials.addData('key_resp_8.started', key_resp_8.tStartRefresh)
trials.addData('key_resp_8.stopped', key_resp_8.tStopRefresh)
thisExp.nextEntry()
# completed 1 repeats of 'trials'
# ------Prepare to start Routine "trial2"-------
# update component parameters for each repeat
key_resp_4.keys = []
key_resp_4.rt = []
# keep track of which components have finished
trial2Components = [text_3, key_resp_4]
for thisComponent in trial2Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
trial2Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "trial2"-------
while continueRoutine:
# get current time
t = trial2Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=trial2Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_3* updates
if text_3.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_3.frameNStart = frameN # exact frame index
text_3.tStart = t # local t and not account for scr refresh
text_3.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_3, 'tStartRefresh') # time at next scr refresh
text_3.setAutoDraw(True)
# *key_resp_4* updates
waitOnFlip = False
if key_resp_4.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp_4.frameNStart = frameN # exact frame index
key_resp_4.tStart = t # local t and not account for scr refresh
key_resp_4.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_4, 'tStartRefresh') # time at next scr refresh
key_resp_4.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_4.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_4.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_4.status == STARTED and not waitOnFlip:
theseKeys = key_resp_4.getKeys(keyList=['return'], waitRelease=False)
if len(theseKeys):
theseKeys = theseKeys[0] # at least one key was pressed
# check for quit:
if "escape" == theseKeys:
endExpNow = True
key_resp_4.keys = theseKeys.name # just the last key pressed
key_resp_4.rt = theseKeys.rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trial2Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "trial2"-------
for thisComponent in trial2Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('text_3.started', text_3.tStartRefresh)
thisExp.addData('text_3.stopped', text_3.tStopRefresh)
# check responses
if key_resp_4.keys in ['', [], None]: # No response was made
key_resp_4.keys = None
thisExp.addData('key_resp_4.keys',key_resp_4.keys)
if key_resp_4.keys != None: # we had a response
thisExp.addData('key_resp_4.rt', key_resp_4.rt)
thisExp.addData('key_resp_4.started', key_resp_4.tStartRefresh)
thisExp.addData('key_resp_4.stopped', key_resp_4.tStopRefresh)
thisExp.nextEntry()
# the Routine "trial2" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "Rules2"-------
# update component parameters for each repeat
key_resp_5.keys = []
key_resp_5.rt = []
# keep track of which components have finished
Rules2Components = [text_7, key_resp_5]
for thisComponent in Rules2Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
Rules2Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "Rules2"-------
while continueRoutine:
# get current time
t = Rules2Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=Rules2Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_7* updates
if text_7.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_7.frameNStart = frameN # exact frame index
text_7.tStart = t # local t and not account for scr refresh
text_7.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_7, 'tStartRefresh') # time at next scr refresh
text_7.setAutoDraw(True)
# *key_resp_5* updates
waitOnFlip = False
if key_resp_5.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp_5.frameNStart = frameN # exact frame index
key_resp_5.tStart = t # local t and not account for scr refresh
key_resp_5.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_5, 'tStartRefresh') # time at next scr refresh
key_resp_5.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_5.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_5.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_5.status == STARTED and not waitOnFlip:
theseKeys = key_resp_5.getKeys(keyList=['return'], waitRelease=False)
if len(theseKeys):
theseKeys = theseKeys[0] # at least one key was pressed
# check for quit:
if "escape" == theseKeys:
endExpNow = True
key_resp_5.keys = theseKeys.name # just the last key pressed
key_resp_5.rt = theseKeys.rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in Rules2Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Rules2"-------
for thisComponent in Rules2Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('text_7.started', text_7.tStartRefresh)
thisExp.addData('text_7.stopped', text_7.tStopRefresh)
# check responses
if key_resp_5.keys in ['', [], None]: # No response was made
key_resp_5.keys = None
thisExp.addData('key_resp_5.keys',key_resp_5.keys)
if key_resp_5.keys != None: # we had a response
thisExp.addData('key_resp_5.rt', key_resp_5.rt)
thisExp.addData('key_resp_5.started', key_resp_5.tStartRefresh)
thisExp.addData('key_resp_5.stopped', key_resp_5.tStopRefresh)
thisExp.nextEntry()
# the Routine "Rules2" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
trials_2 = data.TrialHandler(nReps=1, method='random',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('psychtest.xlsx'),
seed=None, name='trials_2')
thisExp.addLoop(trials_2) # add the loop to the experiment
thisTrial_2 = trials_2.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial_2.rgb)
if thisTrial_2 != None:
for paramName in thisTrial_2:
exec('{} = thisTrial_2[paramName]'.format(paramName))
for thisTrial_2 in trials_2:
currentLoop = trials_2
# abbreviate parameter names if possible (e.g. rgb = thisTrial_2.rgb)
if thisTrial_2 != None:
for paramName in thisTrial_2:
exec('{} = thisTrial_2[paramName]'.format(paramName))
# ------Prepare to start Routine "Plus"-------
routineTimer.add(0.200000)
# update component parameters for each repeat
# keep track of which components have finished
PlusComponents = [text_5]
for thisComponent in PlusComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
PlusClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "Plus"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = PlusClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=PlusClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_5* updates
if text_5.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_5.frameNStart = frameN # exact frame index
text_5.tStart = t # local t and not account for scr refresh
text_5.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_5, 'tStartRefresh') # time at next scr refresh
text_5.setAutoDraw(True)
if text_5.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > text_5.tStartRefresh + 0.2-frameTolerance:
# keep track of stop time/frame for later
text_5.tStop = t # not accounting for scr refresh
text_5.frameNStop = frameN # exact frame index
win.timeOnFlip(text_5, 'tStopRefresh') # time at next scr refresh
text_5.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in PlusComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Plus"-------
for thisComponent in PlusComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
trials_2.addData('text_5.started', text_5.tStartRefresh)
trials_2.addData('text_5.stopped', text_5.tStopRefresh)
# ------Prepare to start Routine "Experiment2"-------
routineTimer.add(0.080000)
# update component parameters for each repeat
image.setImage(Image1)
# keep track of which components have finished
Experiment2Components = [image]
for thisComponent in Experiment2Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
Experiment2Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "Experiment2"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = Experiment2Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=Experiment2Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *image* updates
if image.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
image.frameNStart = frameN # exact frame index
image.tStart = t # local t and not account for scr refresh
image.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(image, 'tStartRefresh') # time at next scr refresh
image.setAutoDraw(True)
if image.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > image.tStartRefresh + 0.08-frameTolerance:
# keep track of stop time/frame for later
image.tStop = t # not accounting for scr refresh
image.frameNStop = frameN # exact frame index
win.timeOnFlip(image, 'tStopRefresh') # time at next scr refresh
image.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in Experiment2Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Experiment2"-------
for thisComponent in Experiment2Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
trials_2.addData('image.started', image.tStartRefresh)
trials_2.addData('image.stopped', image.tStopRefresh)
# ------Prepare to start Routine "sound2"-------
routineTimer.add(0.100000)
# update component parameters for each repeat
# keep track of which components have finished
sound2Components = [image_3]
for thisComponent in sound2Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
sound2Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "sound2"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = sound2Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=sound2Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *image_3* updates
if image_3.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
image_3.frameNStart = frameN # exact frame index
image_3.tStart = t # local t and not account for scr refresh
image_3.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(image_3, 'tStartRefresh') # time at next scr refresh
image_3.setAutoDraw(True)
if image_3.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > image_3.tStartRefresh + 0.1-frameTolerance:
# keep track of stop time/frame for later
image_3.tStop = t # not accounting for scr refresh
image_3.frameNStop = frameN # exact frame index
win.timeOnFlip(image_3, 'tStopRefresh') # time at next scr refresh
image_3.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in sound2Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "sound2"-------
for thisComponent in sound2Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
trials_2.addData('image_3.started', image_3.tStartRefresh)
trials_2.addData('image_3.stopped', image_3.tStopRefresh)
# ------Prepare to start Routine "empty2"-------
routineTimer.add(2.000000)
# update component parameters for each repeat
key_resp_2.keys = []
key_resp_2.rt = []
# keep track of which components have finished
empty2Components = [text_10, key_resp_2]
for thisComponent in empty2Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
empty2Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "empty2"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = empty2Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=empty2Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_10* updates
if text_10.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_10.frameNStart = frameN # exact frame index
text_10.tStart = t # local t and not account for scr refresh
text_10.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_10, 'tStartRefresh') # time at next scr refresh
text_10.setAutoDraw(True)
if text_10.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > text_10.tStartRefresh + 2.0-frameTolerance:
# keep track of stop time/frame for later
text_10.tStop = t # not accounting for scr refresh
text_10.frameNStop = frameN # exact frame index
win.timeOnFlip(text_10, 'tStopRefresh') # time at next scr refresh
text_10.setAutoDraw(False)
# *key_resp_2* updates
waitOnFlip = False
if key_resp_2.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp_2.frameNStart = frameN # exact frame index
key_resp_2.tStart = t # local t and not account for scr refresh
key_resp_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_2, 'tStartRefresh') # time at next scr refresh
key_resp_2.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_2.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_2.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_2.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > key_resp_2.tStartRefresh + 2-frameTolerance:
# keep track of stop time/frame for later
key_resp_2.tStop = t # not accounting for scr refresh
key_resp_2.frameNStop = frameN # exact frame index
win.timeOnFlip(key_resp_2, 'tStopRefresh') # time at next scr refresh
key_resp_2.status = FINISHED
if key_resp_2.status == STARTED and not waitOnFlip:
theseKeys = key_resp_2.getKeys(keyList=['a', 's', 'h'], waitRelease=False)
if len(theseKeys):
theseKeys = theseKeys[0] # at least one key was pressed
# check for quit:
if "escape" == theseKeys:
endExpNow = True
key_resp_2.keys = theseKeys.name # just the last key pressed
key_resp_2.rt = theseKeys.rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in empty2Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "empty2"-------
for thisComponent in empty2Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
trials_2.addData('text_10.started', text_10.tStartRefresh)
trials_2.addData('text_10.stopped', text_10.tStopRefresh)
# check responses
if key_resp_2.keys in ['', [], None]: # No response was made
key_resp_2.keys = None
trials_2.addData('key_resp_2.keys',key_resp_2.keys)
if key_resp_2.keys != None: # we had a response
trials_2.addData('key_resp_2.rt', key_resp_2.rt)
trials_2.addData('key_resp_2.started', key_resp_2.tStartRefresh)
trials_2.addData('key_resp_2.stopped', key_resp_2.tStopRefresh)
thisExp.nextEntry()
# completed 1 repeats of 'trials_2'
# ------Prepare to start Routine "Thankyou"-------
# update component parameters for each repeat
# keep track of which components have finished
ThankyouComponents = [text_2]
for thisComponent in ThankyouComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
ThankyouClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "Thankyou"-------
while continueRoutine:
# get current time
t = ThankyouClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=ThankyouClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_2* updates
if text_2.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_2.frameNStart = frameN # exact frame index
text_2.tStart = t # local t and not account for scr refresh
text_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_2, 'tStartRefresh') # time at next scr refresh
text_2.setAutoDraw(True)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in ThankyouComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Thankyou"-------
for thisComponent in ThankyouComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('text_2.started', text_2.tStartRefresh)
thisExp.addData('text_2.stopped', text_2.tStopRefresh)
# the Routine "Thankyou" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# Flip one final time so any remaining win.callOnFlip()
# and win.timeOnFlip() tasks get executed before quitting
win.flip()
# these shouldn't be strictly necessary (should auto-save)
thisExp.saveAsWideText(filename+'.csv')
thisExp.saveAsPickle(filename)
logging.flush()
# make sure everything is closed down
thisExp.abort() # or data files will save again on exit
win.close()
core.quit()
| 44.358753
| 375
| 0.663956
| 8,280
| 66,893
| 5.280435
| 0.066667
| 0.034262
| 0.011825
| 0.018869
| 0.837999
| 0.7993
| 0.753351
| 0.707973
| 0.637665
| 0.606628
| 0
| 0.019819
| 0.246453
| 66,893
| 1,507
| 376
| 44.388188
| 0.847541
| 0.291346
| 0
| 0.526963
| 0
| 0.001892
| 0.073908
| 0.002369
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.012299
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
89b442a2aa83173bfb33cd89737f11e8446730f6
| 3,336
|
py
|
Python
|
dataset/ucmayo4.py
|
GorkemP/labeled-images-for-ulcerative-colitis
|
83dd4221e9bb6f4a441cafb6ddd74dad0d5f0e55
|
[
"MIT"
] | 2
|
2022-03-15T19:59:15.000Z
|
2022-03-17T07:37:08.000Z
|
dataset/ucmayo4.py
|
GorkemP/labeled-images-for-ulcerative-colitis
|
83dd4221e9bb6f4a441cafb6ddd74dad0d5f0e55
|
[
"MIT"
] | null | null | null |
dataset/ucmayo4.py
|
GorkemP/labeled-images-for-ulcerative-colitis
|
83dd4221e9bb6f4a441cafb6ddd74dad0d5f0e55
|
[
"MIT"
] | null | null | null |
import torch
from torch.utils.data import Dataset
from PIL import Image
import os
import glob
class UCMayo4(Dataset):
"""Ulcerative Colitis dataset grouped according to Endoscopic Mayo scoring system"""
def __init__(self, root_dir, transform=None):
"""
root_dir (string): Path to parent folder where class folders are located.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.class_names = []
self.samples = []
self.transform = transform
subFolders = glob.glob(os.path.join(root_dir, "*"))
subFolders.sort()
for folder in subFolders:
className = folder.split("/")[-1]
self.class_names.append(className)
self.number_of_class = len(self.class_names)
for folder in subFolders:
className = folder.split("/")[-1]
image_paths = glob.glob(os.path.join(folder, "*"))
for image_path in image_paths:
image = Image.open(image_path)
image.load()
self.samples.append((image, self.class_names.index(className)))
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample_image = self.samples[idx][0].copy()
if self.transform:
sample_image = self.transform(sample_image)
return (sample_image, self.samples[idx][1])
class UCMayo4Remission(Dataset):
"""
Ulcerative Colitis dataset grouped according to Endoscopic Mayo scoring system
According to the remission list given in constructor, it has binary output for annotation.
"""
def __init__(self, root_dir, remission=[2, 3], transform=None):
"""
Args:
root_dir (string): Path to parent folder where class folders are located.
resmission (list): Mayo scores (as int) that will be regarded as non-remission state.
transform (callable, optional): Optional transform to be applied on a sample.
"""
self.number_of_class = 2
self.class_names = []
self.samples = []
self.transform = transform
subFolders = glob.glob(os.path.join(root_dir, "*"))
subFolders.sort()
for folder in subFolders:
className = folder.split("/")[-1]
self.class_names.append(className)
for folder in subFolders:
className = folder.split("/")[-1]
image_paths = glob.glob(os.path.join(folder, "*"))
for image_path in image_paths:
image = Image.open(image_path)
image.load()
label = 0
if self.class_names.index(className) in remission:
label = 1
self.samples.append((image, label))
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample_image = self.samples[idx][0].copy()
# TODO since all images are loaded at constructor, transform can be moved there too
if self.transform:
sample_image = self.transform(sample_image)
return (sample_image, self.samples[idx][1])
| 32.38835
| 97
| 0.601019
| 391
| 3,336
| 4.976982
| 0.265985
| 0.056526
| 0.05036
| 0.028777
| 0.752312
| 0.705036
| 0.705036
| 0.705036
| 0.705036
| 0.705036
| 0
| 0.00641
| 0.298561
| 3,336
| 102
| 98
| 32.705882
| 0.825214
| 0.223921
| 0
| 0.741935
| 0
| 0
| 0.00323
| 0
| 0
| 0
| 0
| 0.009804
| 0
| 1
| 0.096774
| false
| 0
| 0.080645
| 0.032258
| 0.274194
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
89baa670798425bdbbb997843280bd98d927b769
| 42
|
py
|
Python
|
pacote-download/ex(1-100)/ex112/utilidadescev/__init__.py
|
gssouza2051/python-exercicios
|
81e87fed7ead0adf58473a741aaa3c83064f6cb5
|
[
"MIT"
] | null | null | null |
pacote-download/ex(1-100)/ex112/utilidadescev/__init__.py
|
gssouza2051/python-exercicios
|
81e87fed7ead0adf58473a741aaa3c83064f6cb5
|
[
"MIT"
] | null | null | null |
pacote-download/ex(1-100)/ex112/utilidadescev/__init__.py
|
gssouza2051/python-exercicios
|
81e87fed7ead0adf58473a741aaa3c83064f6cb5
|
[
"MIT"
] | null | null | null |
from ex111.utilidadescev import moeda,dado
| 42
| 42
| 0.880952
| 6
| 42
| 6.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0.071429
| 42
| 1
| 42
| 42
| 0.871795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
989e5e0ff860fef2127fee5afea5afc3f6a62b14
| 35
|
py
|
Python
|
deadtrees/network/extra/resunetplusplus/__init__.py
|
cwerner/deadtrees
|
15ddfec58c4a40f22f9c1e2424fb535df4d29b03
|
[
"Apache-2.0"
] | 1
|
2021-11-15T09:26:24.000Z
|
2021-11-15T09:26:24.000Z
|
deadtrees/network/extra/resunetplusplus/__init__.py
|
cwerner/deadtrees
|
15ddfec58c4a40f22f9c1e2424fb535df4d29b03
|
[
"Apache-2.0"
] | 43
|
2021-04-19T14:55:05.000Z
|
2022-03-29T13:34:16.000Z
|
deadtrees/network/extra/resunetplusplus/__init__.py
|
cwerner/deadtrees
|
15ddfec58c4a40f22f9c1e2424fb535df4d29b03
|
[
"Apache-2.0"
] | null | null | null |
from .model import ResUnetPlusPlus
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7f2501da9305f389d3f740592cc04a7f9d85b66c
| 114
|
py
|
Python
|
examples/add_module.py
|
satyavls/simple_mock
|
5344f7383de6fa3d8270bec611d6986416d7f278
|
[
"MIT"
] | 1
|
2019-06-03T17:40:31.000Z
|
2019-06-03T17:40:31.000Z
|
examples/add_module.py
|
satyavls/simple_mock
|
5344f7383de6fa3d8270bec611d6986416d7f278
|
[
"MIT"
] | null | null | null |
examples/add_module.py
|
satyavls/simple_mock
|
5344f7383de6fa3d8270bec611d6986416d7f278
|
[
"MIT"
] | null | null | null |
def add_num(x, y):
return x + y
def sub_num(x, y):
return x - y
class MathFunctions(object):
pass
| 10.363636
| 28
| 0.596491
| 20
| 114
| 3.3
| 0.55
| 0.121212
| 0.151515
| 0.333333
| 0.393939
| 0.393939
| 0
| 0
| 0
| 0
| 0
| 0
| 0.289474
| 114
| 10
| 29
| 11.4
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.166667
| 0
| 0.333333
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
7f490e0f83f0b93f89c7b7920c449eae9a2ea21b
| 206
|
py
|
Python
|
galaxydb/__init__.py
|
alantelles/galaxydb
|
7eeeaae3c3f79736eade36a720fb1dc816570554
|
[
"MIT"
] | 2
|
2019-03-15T17:14:16.000Z
|
2019-03-15T20:47:14.000Z
|
galaxydb/__init__.py
|
alantelles/galaxydb
|
7eeeaae3c3f79736eade36a720fb1dc816570554
|
[
"MIT"
] | null | null | null |
galaxydb/__init__.py
|
alantelles/galaxydb
|
7eeeaae3c3f79736eade36a720fb1dc816570554
|
[
"MIT"
] | null | null | null |
from galaxydb.column import Column
from galaxydb.scheme import Scheme
from galaxydb.logic import Logic
from galaxydb.table import Table
from galaxydb.constants import *
from galaxydb.statics import *
| 29.428571
| 35
| 0.815534
| 28
| 206
| 6
| 0.321429
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145631
| 206
| 6
| 36
| 34.333333
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7f4dddafff562d1d609415b1456d1843f25b7c47
| 3,049
|
py
|
Python
|
cgatpipelines/tools/pipeline_docs/pipeline_rnaseqdiffexpression/trackers/Results.py
|
kevinrue/cgat-flow
|
02b5a1867253c2f6fd6b4f3763e0299115378913
|
[
"MIT"
] | 49
|
2015-04-13T16:49:25.000Z
|
2022-03-29T10:29:14.000Z
|
cgatpipelines/tools/pipeline_docs/pipeline_rnaseqdiffexpression/trackers/Results.py
|
kevinrue/cgat-flow
|
02b5a1867253c2f6fd6b4f3763e0299115378913
|
[
"MIT"
] | 252
|
2015-04-08T13:23:34.000Z
|
2019-03-18T21:51:29.000Z
|
cgatpipelines/tools/pipeline_docs/pipeline_rnaseqdiffexpression/trackers/Results.py
|
kevinrue/cgat-flow
|
02b5a1867253c2f6fd6b4f3763e0299115378913
|
[
"MIT"
] | 22
|
2015-05-21T00:37:52.000Z
|
2019-09-25T05:04:27.000Z
|
from CGATReport.Tracker import *
from CGATReport.Utils import PARAMS as P
from IsoformReport import *
###############################################################################
# parse params
###############################################################################
DATABASE = P.get('', P.get('sql_backend', 'sqlite:///./csvdb'))
ANNOTATIONS_DATABASE = P.get('annotations_database')
###############################################################################
# trackers
###############################################################################
class DeseqFeatureResultsGenes(IsoformTracker):
pattern = "deseq2_featurecounts__(.*)_genes_results"
def __call__(self, track, slice=None):
statement = '''
SELECT A.control_name, A.treatment_name, A.control_mean,
A.treatment_mean, A.test_id, A.l2fold, A.p_value, A.p_value_adj,
A.significant FROM deseq2_featurecounts__%(track)s_genes_results
AS A ORDER BY A.significant DESC,
A.l2fold ASC;
'''
return self.getAll(statement)
class EdgerFeatureResultsGenes(IsoformTracker):
pattern = "edger_featurecounts__(.*)_genes_results"
def __call__(self, track, slice=None):
statement = '''
SELECT A.control_name, A.treatment_name, A.control_mean,
A.treatment_mean, A.test_id, A.l2fold, A.p_value, A.p_value_adj,
A.significant FROM edger_featurecounts__%(track)s_genes_results
AS A ORDER BY A.significant DESC,
A.l2fold ASC
'''
return self.getAll(statement)
class DeseqKallistoResultsGenes(IsoformTracker):
pattern = "deseq2_kallisto__(.*)_genes_results"
def __call__(self, track, slice=None):
statement = '''
SELECT A.control_name, A.treatment_name, A.control_mean,
A.treatment_mean, A.test_id, A.l2fold, A.p_value, A.p_value_adj,
A.significant FROM deseq2_kallisto__%(track)s_genes_results
AS A ORDER BY A.significant DESC,
A.l2fold ASC
'''
return self.getAll(statement)
class EdgerKallistoResultsGenes(IsoformTracker):
pattern = "edger_kallisto__(.*)_genes_results"
def __call__(self, track, slice=None):
statement = '''
SELECT A.control_name, A.treatment_name, A.control_mean,
A.treatment_mean, A.test_id, A.l2fold, A.p_value, A.p_value_adj,
A.significant FROM edger_kallisto__%(track)s_genes_results AS
A ORDER BY A.significant DESC,
A.l2fold ASC
'''
return self.getAll(statement)
class SleuthKallistoResultsGenes(IsoformTracker):
pattern = "sleuth_kallisto__(.*)_genes_results"
def __call__(self, track, slice=None):
statement = '''
SELECT A.control_name, A.treatment_name, A.control_mean,
A.treatment_mean, A.test_id, A.l2fold, A.p_value, A.p_value_adj,
A.significant FROM sleuth_kallisto__%(track)s_genes_results AS
A ORDER BY A.significant DESC,
A.l2fold ASC
'''
return self.getAll(statement)
| 30.49
| 79
| 0.608396
| 347
| 3,049
| 5.020173
| 0.170029
| 0.068886
| 0.040184
| 0.054535
| 0.741676
| 0.741676
| 0.741676
| 0.741676
| 0.741676
| 0.741676
| 0
| 0.005714
| 0.196458
| 3,049
| 99
| 80
| 30.79798
| 0.705306
| 0.006888
| 0
| 0.65
| 0
| 0
| 0.60192
| 0.144387
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.05
| 0
| 0.383333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f69dfd5273c85c83e5fd287ed67d12b45233de18
| 43
|
py
|
Python
|
simulation/device/simulated/air_conditioner/__init__.py
|
LBNL-ETA/LPDM
|
3384a784b97e49cd7a801b758717a7107a51119f
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2019-01-05T02:33:38.000Z
|
2020-04-22T16:57:50.000Z
|
simulation/device/simulated/air_conditioner/__init__.py
|
LBNL-ETA/LPDM
|
3384a784b97e49cd7a801b758717a7107a51119f
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2019-04-17T18:13:08.000Z
|
2021-04-23T22:40:23.000Z
|
simulation/device/simulated/air_conditioner/__init__.py
|
LBNL-ETA/LPDM
|
3384a784b97e49cd7a801b758717a7107a51119f
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2019-01-31T08:37:44.000Z
|
2019-01-31T08:37:44.000Z
|
from air_conditioner import AirConditioner
| 21.5
| 42
| 0.906977
| 5
| 43
| 7.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.974359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f6e5c99d7184da2fbbd07b8e09ae981f7738a398
| 5,838
|
py
|
Python
|
stp_raet/test/test_communication.py
|
ArtObr/indy-plenum
|
c568eefb0042b3ec3aec84e9241cb1b5df419365
|
[
"Apache-2.0"
] | null | null | null |
stp_raet/test/test_communication.py
|
ArtObr/indy-plenum
|
c568eefb0042b3ec3aec84e9241cb1b5df419365
|
[
"Apache-2.0"
] | null | null | null |
stp_raet/test/test_communication.py
|
ArtObr/indy-plenum
|
c568eefb0042b3ec3aec84e9241cb1b5df419365
|
[
"Apache-2.0"
] | null | null | null |
from ioflo.base.consoling import getConsole
from stp_core.crypto.nacl_wrappers import Signer as NaclSigner, Privateer
from raet.raeting import AutoMode, Acceptance
from raet.road.estating import RemoteEstate
from raet.road.stacking import RoadStack
from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote
from stp_core.common.log import getlogger
from stp_core.network.port_dispenser import genHa
logger = getlogger()
def testPromiscuousConnection(tdir):
alpha = RoadStack(name='alpha',
ha=genHa(),
auto=AutoMode.always,
basedirpath=tdir)
beta = RoadStack(name='beta',
ha=genHa(),
main=True,
auto=AutoMode.always,
basedirpath=tdir)
try:
betaRemote = RemoteEstate(stack=alpha, ha=beta.ha)
alpha.addRemote(betaRemote)
alpha.join(uid=betaRemote.uid, cascade=True)
handshake(alpha, beta)
sendMsgs(alpha, beta, betaRemote)
finally:
cleanup(alpha, beta)
def testRaetPreSharedKeysPromiscous(tdir):
alphaSigner = NaclSigner()
betaSigner = NaclSigner()
logger.debug("Alpha's verkey {}".format(alphaSigner.verhex))
logger.debug("Beta's verkey {}".format(betaSigner.verhex))
alpha = RoadStack(name='alpha',
ha=genHa(),
sigkey=alphaSigner.keyhex,
auto=AutoMode.always,
basedirpath=tdir)
beta = RoadStack(name='beta',
ha=genHa(),
sigkey=betaSigner.keyhex,
main=True,
auto=AutoMode.always,
basedirpath=tdir)
try:
betaRemote = RemoteEstate(stack=alpha, ha=beta.ha,
verkey=betaSigner.verhex)
alpha.addRemote(betaRemote)
alpha.allow(uid=betaRemote.uid, cascade=True)
handshake(alpha, beta)
sendMsgs(alpha, beta, betaRemote)
finally:
cleanup(alpha, beta)
def testRaetPreSharedKeysNonPromiscous(tdir):
alphaSigner = NaclSigner()
betaSigner = NaclSigner()
alphaPrivateer = Privateer()
betaPrivateer = Privateer()
logger.debug("Alpha's verkey {}".format(alphaSigner.verhex))
logger.debug("Beta's verkey {}".format(betaSigner.verhex))
alpha = RoadStack(name='alpha',
ha=genHa(),
sigkey=alphaSigner.keyhex,
prikey=alphaPrivateer.keyhex,
auto=AutoMode.never,
basedirpath=tdir)
beta = RoadStack(name='beta',
ha=genHa(),
sigkey=betaSigner.keyhex,
prikey=betaPrivateer.keyhex,
main=True,
auto=AutoMode.never,
basedirpath=tdir)
alpha.keep.dumpRemoteRoleData({
"acceptance": Acceptance.accepted.value,
"verhex": betaSigner.verhex,
"pubhex": betaPrivateer.pubhex
}, "beta")
beta.keep.dumpRemoteRoleData({
"acceptance": Acceptance.accepted.value,
"verhex": alphaSigner.verhex,
"pubhex": alphaPrivateer.pubhex
}, "alpha")
try:
betaRemote = RemoteEstate(stack=alpha, ha=beta.ha)
alpha.addRemote(betaRemote)
alpha.allow(uid=betaRemote.uid, cascade=True)
handshake(alpha, beta)
sendMsgs(alpha, beta, betaRemote)
finally:
cleanup(alpha, beta)
def testConnectionWithHaChanged(tdir):
console = getConsole()
console.reinit(verbosity=console.Wordage.verbose)
alphaSigner = NaclSigner()
betaSigner = NaclSigner()
alphaPrivateer = Privateer()
betaPrivateer = Privateer()
logger.debug("Alpha's verkey {}".format(alphaSigner.verhex))
logger.debug("Beta's verkey {}".format(betaSigner.verhex))
alpha = None
def setupAlpha(ha):
nonlocal alpha
alpha = RoadStack(name='alpha',
ha=ha,
sigkey=alphaSigner.keyhex,
prikey=alphaPrivateer.keyhex,
auto=AutoMode.never,
basedirpath=tdir)
alpha.keep.dumpRemoteRoleData({
"acceptance": Acceptance.accepted.value,
"verhex": betaSigner.verhex,
"pubhex": betaPrivateer.pubhex
}, "beta")
oldHa = genHa()
setupAlpha(oldHa)
beta = RoadStack(name='beta',
ha=genHa(),
sigkey=betaSigner.keyhex,
prikey=betaPrivateer.keyhex,
main=True,
auto=AutoMode.never,
basedirpath=tdir, mutable=True)
beta.keep.dumpRemoteRoleData({
"acceptance": Acceptance.accepted.value,
"verhex": alphaSigner.verhex,
"pubhex": alphaPrivateer.pubhex
}, "alpha")
try:
betaRemote = RemoteEstate(stack=alpha, ha=beta.ha)
alpha.addRemote(betaRemote)
alpha.join(uid=betaRemote.uid, cascade=True)
handshake(alpha, beta)
sendMsgs(alpha, beta, betaRemote)
logger.debug("beta knows alpha as {}".
format(getRemote(beta, "alpha").ha))
cleanup(alpha)
newHa = genHa()
logger.debug("alpha changing ha to {}".format(newHa))
setupAlpha(newHa)
betaRemote = RemoteEstate(stack=alpha, ha=beta.ha)
alpha.addRemote(betaRemote)
alpha.join(uid=betaRemote.uid, cascade=True)
handshake(alpha, beta)
sendMsgs(alpha, beta, betaRemote)
logger.debug("beta knows alpha as {}".
format(getRemote(beta, "alpha").ha))
finally:
cleanup(alpha, beta)
| 29.484848
| 73
| 0.577595
| 524
| 5,838
| 6.423664
| 0.179389
| 0.037433
| 0.023173
| 0.047534
| 0.779263
| 0.757279
| 0.748366
| 0.748366
| 0.748366
| 0.748366
| 0
| 0
| 0.319459
| 5,838
| 197
| 74
| 29.634518
| 0.847219
| 0
| 0
| 0.810811
| 0
| 0
| 0.054471
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033784
| false
| 0
| 0.054054
| 0
| 0.087838
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
63df36dd82f3b996a2b0bac96c47e201bb8f3bbf
| 98
|
py
|
Python
|
docsie_universal_importer/providers/google_drive/__init__.py
|
Zarif99/test-universal
|
062972ed64d9f048de702ab1edf4025cffca2abb
|
[
"BSD-3-Clause"
] | null | null | null |
docsie_universal_importer/providers/google_drive/__init__.py
|
Zarif99/test-universal
|
062972ed64d9f048de702ab1edf4025cffca2abb
|
[
"BSD-3-Clause"
] | 16
|
2021-06-16T15:00:41.000Z
|
2021-06-30T11:57:15.000Z
|
docsie_universal_importer/providers/google_drive/__init__.py
|
Zarif99/test-universal
|
062972ed64d9f048de702ab1edf4025cffca2abb
|
[
"BSD-3-Clause"
] | 1
|
2021-11-17T19:24:45.000Z
|
2021-11-17T19:24:45.000Z
|
default_app_config = 'docsie_universal_importer.providers.google_drive.apps.GoogleDriveAppConfig'
| 49
| 97
| 0.897959
| 11
| 98
| 7.545455
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030612
| 98
| 1
| 98
| 98
| 0.873684
| 0
| 0
| 0
| 0
| 0
| 0.755102
| 0.755102
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
124f9adbc3629f60192bbc345789c5fe360c9cdf
| 190
|
py
|
Python
|
{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/utils/context_processor.py
|
abahnihi/kn-django-cookiecutter
|
bf85aa47b6aae450d25551fdf68c943f41b5c6bd
|
[
"MIT"
] | 2
|
2020-07-26T07:33:08.000Z
|
2020-08-14T09:40:21.000Z
|
{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/utils/context_processor.py
|
abahnihi/kn-django-cookiecutter
|
bf85aa47b6aae450d25551fdf68c943f41b5c6bd
|
[
"MIT"
] | 7
|
2020-02-12T01:19:42.000Z
|
2022-03-11T23:26:05.000Z
|
{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/utils/context_processor.py
|
abahnihi/kn-django-cookiecutter
|
bf85aa47b6aae450d25551fdf68c943f41b5c6bd
|
[
"MIT"
] | 9
|
2020-09-22T10:42:23.000Z
|
2021-07-28T05:52:26.000Z
|
from django.conf import settings
def google_analytics(request):
return {'GOOGLE_ANALYTICS': settings.GOOGLE_ANALYTICS}
def debug_state(request):
return {'DEBUG': settings.DEBUG}
| 19
| 58
| 0.763158
| 23
| 190
| 6.130435
| 0.521739
| 0.319149
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136842
| 190
| 9
| 59
| 21.111111
| 0.859756
| 0
| 0
| 0
| 0
| 0
| 0.110526
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
1262334f0b76e271530a07f67a720a33c98f152f
| 208
|
py
|
Python
|
utilities/error.py
|
pskanade/stretch
|
5320769f73a1f49e91cdaaaede3570550a236d9f
|
[
"MIT"
] | null | null | null |
utilities/error.py
|
pskanade/stretch
|
5320769f73a1f49e91cdaaaede3570550a236d9f
|
[
"MIT"
] | 2
|
2018-08-29T18:39:52.000Z
|
2018-08-29T19:32:35.000Z
|
utilities/error.py
|
pskanade/stretch
|
5320769f73a1f49e91cdaaaede3570550a236d9f
|
[
"MIT"
] | null | null | null |
class Error():
def __init__(self):
print("An error has occured !")
class TypeError(Error):
def __init__(self):
print("This is Type Error\nThere is a type mismatch.. ! Please fix it.")
| 29.714286
| 80
| 0.639423
| 29
| 208
| 4.310345
| 0.655172
| 0.128
| 0.192
| 0.256
| 0.336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235577
| 208
| 7
| 80
| 29.714286
| 0.786164
| 0
| 0
| 0.333333
| 0
| 0
| 0.406699
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
89f91a1961150ce12780225b7d9f50a8875e2688
| 40
|
py
|
Python
|
jsonate/exceptions.py
|
weswil07/JSONate
|
128bba5c33ce221675b35db5afe338cfe40acdc5
|
[
"MIT"
] | 5
|
2015-07-13T23:12:29.000Z
|
2019-06-28T06:15:49.000Z
|
jsonate/exceptions.py
|
weswil07/JSONate
|
128bba5c33ce221675b35db5afe338cfe40acdc5
|
[
"MIT"
] | 14
|
2015-07-13T23:25:23.000Z
|
2022-03-12T00:36:32.000Z
|
jsonate/exceptions.py
|
weswil07/JSONate
|
128bba5c33ce221675b35db5afe338cfe40acdc5
|
[
"MIT"
] | 3
|
2019-01-10T21:34:58.000Z
|
2021-09-21T18:43:17.000Z
|
class CouldntSerialize(Exception): pass
| 20
| 39
| 0.85
| 4
| 40
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 40
| 2
| 39
| 20
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
d61eff1a921d2135040a6c02d46079a5efd10e3f
| 69
|
py
|
Python
|
src/pyrouge/rouge/pyrouge/__init__.py
|
bzhao2718/PreSumm
|
974f73a6baefc691d396e130c7d9fdc2b71c2a31
|
[
"MIT"
] | 4
|
2020-09-24T10:12:36.000Z
|
2020-10-27T00:37:52.000Z
|
pyrouge/pyrouge/__init__.py
|
jackie930/TextRank4ZH
|
0462dd263737798c620fdf0d3a81e5306302e60f
|
[
"MIT"
] | 1
|
2022-03-13T21:50:43.000Z
|
2022-03-15T05:18:12.000Z
|
pyrouge/pyrouge/__init__.py
|
jackie930/TextRank4ZH
|
0462dd263737798c620fdf0d3a81e5306302e60f
|
[
"MIT"
] | 1
|
2022-03-11T16:41:20.000Z
|
2022-03-11T16:41:20.000Z
|
from pyrouge.base import Doc, Sent
from pyrouge.rouge import Rouge155
| 34.5
| 34
| 0.84058
| 11
| 69
| 5.272727
| 0.727273
| 0.37931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04918
| 0.115942
| 69
| 2
| 35
| 34.5
| 0.901639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d620ca977f85bedcf66737660077fc3deabaeec7
| 6,982
|
py
|
Python
|
tests/unit/saltenv/ops/test_unit_get_current_version.py
|
eitrtechnologies/saltenv
|
66add964657fe270ed96ddfe50802e27539a6526
|
[
"Apache-2.0"
] | 5
|
2022-03-25T17:15:04.000Z
|
2022-03-28T23:24:26.000Z
|
tests/unit/saltenv/ops/test_unit_get_current_version.py
|
eitrtechnologies/saltenv
|
66add964657fe270ed96ddfe50802e27539a6526
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/saltenv/ops/test_unit_get_current_version.py
|
eitrtechnologies/saltenv
|
66add964657fe270ed96ddfe50802e27539a6526
|
[
"Apache-2.0"
] | 2
|
2022-03-26T06:33:30.000Z
|
2022-03-29T19:43:50.000Z
|
from unittest.mock import MagicMock
from unittest.mock import patch
import aiofiles
from aiofiles import threadpool
async def test_unit_get_current_version_both_files_dont_exist(mock_hub, hub, tmp_path):
"""
SCENARIO #1
- override_version_file DOES NOT EXIST
- main_version_file DOES NOT EXIST
"""
# Link the function to the mock_hub
mock_hub.saltenv.ops.get_current_version = hub.saltenv.ops.get_current_version
# Set the saltenv_dir as a nonexistent directory
mock_hub.OPT.saltenv.saltenv_dir = "nonexistent_testing_dir"
# Patch os.getcwd() to be the mock directory
with patch("os.getcwd", return_value=tmp_path) as mock_cwd:
# Patch the exists function to return False for both times it is called
with patch("pathlib.PosixPath.exists", side_effect=[False, False]) as mock_exists:
expected = ("", "")
actual = await mock_hub.saltenv.ops.get_current_version()
actual == expected
# Ensure every mocked function was called the appropriate number of times
mock_cwd.assert_called_once()
assert mock_exists.call_count == 2
async def test_unit_get_current_version_only_override_exists(mock_hub, hub, tmp_path):
"""
SCENARIO #2
- override_version_file DOES EXIST
- main_version_file DOES NOT EXIST
"""
# Link the function to the mock_hub
mock_hub.saltenv.ops.get_current_version = hub.saltenv.ops.get_current_version
# Set the saltenv_dir as a nonexistent directory
mock_hub.OPT.saltenv.saltenv_dir = "nonexistent_testing_dir"
# Patch os.getcwd() to be the mock directory
with patch("os.getcwd", return_value=tmp_path) as mock_cwd:
# Patch exists to return True the first call and False the second call
with patch("pathlib.PosixPath.exists", side_effect=[True, False]) as mock_exists:
# Register the return type with aiofiles.threadpool.wrap dispatcher
aiofiles.threadpool.wrap.register(MagicMock)(
lambda *args, **kwargs: threadpool.AsyncBufferedIOBase(*args, **kwargs)
)
# Mock the file returned by aiofiles.open
mock_override_version = "3004"
mock_file = MagicMock()
with patch("aiofiles.threadpool.sync_open", return_value=mock_file) as mock_open:
# Set the value of read() to be the mock version
mock_file.read.return_value = mock_override_version
# Call get_current_version
expected = (mock_override_version, tmp_path / ".salt-version")
actual = await mock_hub.saltenv.ops.get_current_version()
actual == expected
# Ensure every mocked function was called the appropriate number of times
mock_cwd.assert_called_once()
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_file.read.assert_called_once()
async def test_unit_get_current_version_only_main_exists(mock_hub, hub, tmp_path):
"""
SCENARIO #3
- override_version_file DOES NOT EXIST
- main_version_file DOES EXIST
"""
# Link the function to the mock_hub
mock_hub.saltenv.ops.get_current_version = hub.saltenv.ops.get_current_version
# Set the saltenv_dir as the mock directory
mock_hub.OPT.saltenv.saltenv_dir = tmp_path
# Patch os.getcwd() to be the nonexistent directory
with patch("os.getcwd", return_value="nonexistent_testing_dir") as mock_cwd:
# Patch exists to return False the first call and True the second call
with patch("pathlib.PosixPath.exists", side_effect=[False, True]) as mock_exists:
# Register the return type with aiofiles.threadpool.wrap dispatcher
aiofiles.threadpool.wrap.register(MagicMock)(
lambda *args, **kwargs: threadpool.AsyncBufferedIOBase(*args, **kwargs)
)
# Mock the file returned by aiofiles.open
mock_main_version = "3003"
mock_file = MagicMock()
with patch("aiofiles.threadpool.sync_open", return_value=mock_file) as mock_open:
# Set the value of read() to be the mock version
mock_file.read.return_value = mock_main_version
# Call get_current_version
expected = (mock_main_version, tmp_path / "version")
actual = await mock_hub.saltenv.ops.get_current_version()
actual == expected
# Ensure every mocked function was called the appropriate number of times
mock_cwd.assert_called_once()
assert mock_exists.call_count == 2
mock_open.assert_called_once()
mock_file.read.assert_called_once()
async def test_unit_get_current_version_both_files_exist(mock_hub, hub, tmp_path):
"""
SCENARIO #4
- override_version_file DOES EXIST
- main_version_file DOES EXIST
"""
# Link the function to the mock_hub
mock_hub.saltenv.ops.get_current_version = hub.saltenv.ops.get_current_version
# Set the saltenv_dir as the mock directory
mock_hub.OPT.saltenv.saltenv_dir = tmp_path
# Patch os.getcwd() to be the mock directory
with patch("os.getcwd", return_value=tmp_path) as mock_cwd:
# Patch exists to return True for both calls
with patch("pathlib.PosixPath.exists", side_effect=[True, True]) as mock_exists:
# Register the return type with aiofiles.threadpool.wrap dispatcher
aiofiles.threadpool.wrap.register(MagicMock)(
lambda *args, **kwargs: threadpool.AsyncBufferedIOBase(*args, **kwargs)
)
# Mock the file returned by aiofiles.open
mock_override_version = "3004"
mock_override_file = MagicMock()
# Set the value of read() to "3004"
mock_override_file.read.return_value = mock_override_version
mock_main_file = MagicMock()
# Set the value of read() to "3003"
mock_main_file.read.return_value = mock_main_file
# Set the open() to return the mocked file for override and then the mocked file for main
with patch(
"aiofiles.threadpool.sync_open", side_effect=[mock_override_file, mock_main_file]
) as mock_open:
# Call get_current_version
expected = (mock_override_version, tmp_path / ".salt-version")
actual = await mock_hub.saltenv.ops.get_current_version()
actual == expected
# Ensure every mocked function was called the appropriate number of times
mock_cwd.assert_called_once()
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_override_file.read.assert_called_once()
assert mock_main_file.read.call_count == 0
| 44.189873
| 101
| 0.664423
| 893
| 6,982
| 4.93505
| 0.114222
| 0.031768
| 0.073292
| 0.043567
| 0.88246
| 0.874745
| 0.859769
| 0.796233
| 0.749716
| 0.727706
| 0
| 0.005263
| 0.265254
| 6,982
| 157
| 102
| 44.471338
| 0.853801
| 0.239759
| 0
| 0.605263
| 0
| 0
| 0.068617
| 0.051927
| 0
| 0
| 0
| 0
| 0.197368
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d62d910445835d91faa3e110d7eb3b7db0b66ad0
| 2,168
|
py
|
Python
|
epytope/Data/pssms/tepitopepan/mat/DRB1_1227_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/tepitopepan/mat/DRB1_1227_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/tepitopepan/mat/DRB1_1227_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
DRB1_1227_9 = {0: {'A': -999.0, 'E': -999.0, 'D': -999.0, 'G': -999.0, 'F': -0.99657, 'I': -0.003434, 'H': -999.0, 'K': -999.0, 'M': -0.003434, 'L': -0.003434, 'N': -999.0, 'Q': -999.0, 'P': -999.0, 'S': -999.0, 'R': -999.0, 'T': -999.0, 'W': -0.99657, 'V': -0.003434, 'Y': -0.99657}, 1: {'A': 0.0, 'E': 0.1, 'D': -1.3, 'G': 0.5, 'F': 0.8, 'I': 1.1, 'H': 0.8, 'K': 1.1, 'M': 1.1, 'L': 1.0, 'N': 0.8, 'Q': 1.2, 'P': -0.5, 'S': -0.3, 'R': 2.2, 'T': 0.0, 'W': -0.1, 'V': 2.1, 'Y': 0.9}, 2: {'A': 0.0, 'E': -1.2, 'D': -1.3, 'G': 0.2, 'F': 0.8, 'I': 1.5, 'H': 0.2, 'K': 0.0, 'M': 1.4, 'L': 1.0, 'N': 0.5, 'Q': 0.0, 'P': 0.3, 'S': 0.2, 'R': 0.7, 'T': 0.0, 'W': 0.0, 'V': 0.5, 'Y': 0.8}, 3: {'A': 0.0, 'E': -1.3194, 'D': -1.3491, 'G': -1.3606, 'F': 0.48475, 'I': 0.46988, 'H': -0.54865, 'K': 0.88535, 'M': 1.1587, 'L': 0.83677, 'N': 0.0041609, 'Q': -0.56024, 'P': -1.3612, 'S': -0.82154, 'R': 0.73574, 'T': -0.82984, 'W': 0.032588, 'V': 0.21286, 'Y': 0.71588}, 4: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 5: {'A': 0.0, 'E': -1.4087, 'D': -2.3867, 'G': -0.70627, 'F': -1.3964, 'I': 0.69222, 'H': -0.11208, 'K': 1.2652, 'M': -0.90101, 'L': 0.18823, 'N': -0.58182, 'Q': -0.31126, 'P': 0.4949, 'S': -0.089495, 'R': 0.96923, 'T': 0.80924, 'W': -1.3956, 'V': 1.1961, 'Y': -1.3995}, 6: {'A': 0.0, 'E': -1.5721, 'D': -2.4641, 'G': -0.49836, 'F': -0.45015, 'I': 0.22862, 'H': -0.38461, 'K': -0.38479, 'M': 0.73093, 'L': 0.85457, 'N': -0.97365, 'Q': -1.0401, 'P': -0.41067, 'S': -1.2228, 'R': -0.3597, 'T': -1.5512, 'W': -0.58124, 'V': -0.68614, 'Y': -0.57573}, 7: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': 0.0, 'E': -0.57458, 'D': -0.74397, 'G': -0.45401, 'F': -0.38119, 'I': 0.049005, 'H': 0.38856, 'K': -0.55169, 'M': 0.20574, 'L': -0.3601, 'N': -0.66333, 'Q': 0.60568, 'P': -1.0494, 'S': 0.67896, 'R': -0.85656, 'T': -0.77128, 'W': -0.6218, 'V': -0.36764, 'Y': -0.42878}}
| 2,168
| 2,168
| 0.396679
| 525
| 2,168
| 1.634286
| 0.201905
| 0.114219
| 0.027972
| 0.037296
| 0.223776
| 0.142191
| 0.142191
| 0.142191
| 0.132867
| 0.132867
| 0
| 0.376652
| 0.162362
| 2,168
| 1
| 2,168
| 2,168
| 0.095815
| 0
| 0
| 0
| 0
| 0
| 0.078838
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c395f4dc93b3cc9cf83be3db2fc2eff8ac8f3237
| 13,261
|
py
|
Python
|
etl/parsers/etw/Microsoft_Windows_UAC_FileVirtualization.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 104
|
2020-03-04T14:31:31.000Z
|
2022-03-28T02:59:36.000Z
|
etl/parsers/etw/Microsoft_Windows_UAC_FileVirtualization.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 7
|
2020-04-20T09:18:39.000Z
|
2022-03-19T17:06:19.000Z
|
etl/parsers/etw/Microsoft_Windows_UAC_FileVirtualization.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 16
|
2020-03-05T18:55:59.000Z
|
2022-03-01T10:19:28.000Z
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-UAC-FileVirtualization
GUID : c02afc2b-e24e-4449-ad76-bcc2c2575ead
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2000, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2000_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2001, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2001_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2002, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2002_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2003, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2003_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2004, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2004_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2005, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2005_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2006, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2006_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2007, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2007_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2008, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2008_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2009, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2009_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2010, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2010_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2011, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2011_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2012, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2012_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2013, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2013_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2014, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2014_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2015, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2015_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2016, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2016_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2017, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2017_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2018, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2018_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2019, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2019_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=4000, version=0)
class Microsoft_Windows_UAC_FileVirtualization_4000_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"CreateOptions" / Int32ul,
"DesiredAccess" / Int32ul,
"IrpMajorFunction" / Int8ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=4001, version=0)
class Microsoft_Windows_UAC_FileVirtualization_4001_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"TargetFileNameLength" / Int16ul,
"TargetFileNameBuffer" / Bytes(lambda this: this.TargetFileNameLength)
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=4002, version=0)
class Microsoft_Windows_UAC_FileVirtualization_4002_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength)
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5000, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5000_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"CreateOptions" / Int32ul,
"DesiredAccess" / Int32ul,
"IrpMajorFunction" / Int8ul,
"Exclusions" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5002, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5002_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"CreateOptions" / Int32ul,
"DesiredAccess" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5003, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5003_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength)
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5004, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5004_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength)
)
| 37.780627
| 123
| 0.681623
| 1,283
| 13,261
| 6.91894
| 0.06781
| 0.079306
| 0.108145
| 0.136983
| 0.936465
| 0.936465
| 0.93241
| 0.780331
| 0.780331
| 0.780331
| 0
| 0.090507
| 0.199306
| 13,261
| 350
| 124
| 37.888571
| 0.745526
| 0.008069
| 0
| 0.686207
| 0
| 0
| 0.235907
| 0.144237
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013793
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c3cd50d3bef2109a7394181b196687c2fce15100
| 24
|
py
|
Python
|
catkin_ws/src/00-infrastructure/easy_regression/include/easy_regression/processors/__init__.py
|
yxiao1996/dev
|
e2181233aaa3d16c472b792b58fc4863983825bd
|
[
"CC-BY-2.0"
] | 2
|
2018-06-25T02:51:25.000Z
|
2018-06-25T02:51:27.000Z
|
catkin_ws/src/00-infrastructure/easy_regression/include/easy_regression/processors/__init__.py
|
yxiao1996/dev
|
e2181233aaa3d16c472b792b58fc4863983825bd
|
[
"CC-BY-2.0"
] | null | null | null |
catkin_ws/src/00-infrastructure/easy_regression/include/easy_regression/processors/__init__.py
|
yxiao1996/dev
|
e2181233aaa3d16c472b792b58fc4863983825bd
|
[
"CC-BY-2.0"
] | 2
|
2018-09-04T06:44:21.000Z
|
2018-10-15T02:30:50.000Z
|
from .identity import *
| 12
| 23
| 0.75
| 3
| 24
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 2
| 23
| 12
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c3d3a48562b302ec3d1c4f7d9f346e8c2423f4ac
| 78
|
py
|
Python
|
segmentation_tools/__init__.py
|
shiwei23/ImageAnalysis3
|
1d2aa1721d188c96feb55b22fc6c9929d7073f49
|
[
"MIT"
] | 3
|
2018-10-10T22:15:10.000Z
|
2020-11-20T15:17:45.000Z
|
segmentation_tools/__init__.py
|
shiwei23/ImageAnalysis3
|
1d2aa1721d188c96feb55b22fc6c9929d7073f49
|
[
"MIT"
] | 2
|
2019-10-31T13:29:05.000Z
|
2021-08-12T17:32:32.000Z
|
segmentation_tools/__init__.py
|
shiwei23/ImageAnalysis3
|
1d2aa1721d188c96feb55b22fc6c9929d7073f49
|
[
"MIT"
] | 2
|
2020-06-04T18:40:52.000Z
|
2022-03-18T15:53:05.000Z
|
# Functions to segment chromosomes
from . import chromosome
from . import cell
| 26
| 34
| 0.807692
| 10
| 78
| 6.3
| 0.8
| 0.31746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 78
| 3
| 35
| 26
| 0.954545
| 0.410256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c3da32e04dd68552d6766ba134d4dbed387f0a82
| 2,051
|
py
|
Python
|
test.py
|
ndwuhuangwei/py-radio-autoencoder
|
842cd1f14a17ee0798766dffcf132950a9e745bd
|
[
"CC0-1.0"
] | null | null | null |
test.py
|
ndwuhuangwei/py-radio-autoencoder
|
842cd1f14a17ee0798766dffcf132950a9e745bd
|
[
"CC0-1.0"
] | null | null | null |
test.py
|
ndwuhuangwei/py-radio-autoencoder
|
842cd1f14a17ee0798766dffcf132950a9e745bd
|
[
"CC0-1.0"
] | 1
|
2021-09-06T14:05:53.000Z
|
2021-09-06T14:05:53.000Z
|
import math
import random
import numpy as np
# 先生成一个随机的信源
def random_sources():
random_sources = random.randint(0, 16)
print('这个随机数是', random_sources)
return hanming(random_sources)
# return bin(int(random_sources))
# 进行编码,使用异或规则生成有校验位的(7,4)汉明码字
# def hanming(code_0):
# # 把十进制的数字转变成二进制
# code1 = bin(int(code_0))
# code = str(code1)[2:]
# print('{0}变成二进制'.format(code_0), code)
# # # 判断待验证位数是否达到4位,不足位数前面补0
# while len(code) < 4:
# code = '0' + code
# # 将码字转变成列表格式,方便后面进行操作
# # print '补齐4位之后',code
# code_list = list(code)
# # 编码结构即码字,对于(7,4)线性分组码汉明码而言
# code_1 = int(code_list[0]) ^ int(code_list[2]) ^ int(code_list[3])
# code_2 = int(code_list[0]) ^ int(code_list[1]) ^ int(code_list[2])
# code_4 = int(code_list[1]) ^ int(code_list[2]) ^ int(code_list[3])
# code_list.insert(0, str(code_1))
# code_list.insert(1, str(code_2))
# code_list.insert(2, str(code_4))
# hanming_code = ''.join(code_list)
# print('生成的(7,4)汉明码字:' + hanming_code)
# return code_list
def hanming(code_0):
# 把十进制的数字转变成二进制
code1 = bin(int(code_0))
code = str(code1)[2:]
print('{0}变成二进制'.format(code_0), code)
# # 判断待验证位数是否达到4位,不足位数前面补0
while len(code) < 4:
code = '0' + code
# 将码字转变成列表格式,方便后面进行操作
# print '补齐4位之后',code
code_list = list(code)
# 编码结构即码字,对于(7,4)线性分组码汉明码而言
code_1 = int(code_list[0]) ^ int(code_list[1]) ^ int(code_list[3]) ^ 1
code_2 = int(code_list[0]) ^ int(code_list[2]) ^ int(code_list[3]) ^ 1
code_4 = int(code_list[1]) ^ int(code_list[2]) ^ int(code_list[3]) ^ 1
code_list.insert(0, str(code_1))
code_list.insert(1, str(code_2))
code_list.insert(3, str(code_4))
hanming_code = ''.join(code_list)
print('生成的(7,4)汉明码字:' + hanming_code)
return code_list
if __name__ == '__main__':
# x是原始信号,生成的(7,4)汉明码
# x1 = random_sources()
x1 = hanming(3)
print(x1)
| 31.553846
| 100
| 0.592394
| 299
| 2,051
| 3.842809
| 0.173913
| 0.208877
| 0.172324
| 0.062663
| 0.78416
| 0.78416
| 0.78242
| 0.78242
| 0.781549
| 0.772846
| 0
| 0.053247
| 0.249147
| 2,051
| 64
| 101
| 32.046875
| 0.692857
| 0.517796
| 0
| 0
| 0
| 0
| 0.037895
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.115385
| 0
| 0.269231
| 0.153846
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c3e416fee43806fffc3e5957dc5258f61a408baa
| 12,483
|
py
|
Python
|
drizzlepac/run_hla_flag_filter.py
|
srodney/drizzlepac
|
c554523331a6204ce113d4317b7286ad39094f74
|
[
"BSD-3-Clause"
] | 2
|
2020-02-10T16:15:58.000Z
|
2021-03-24T20:08:03.000Z
|
drizzlepac/run_hla_flag_filter.py
|
srodney/drizzlepac
|
c554523331a6204ce113d4317b7286ad39094f74
|
[
"BSD-3-Clause"
] | null | null | null |
drizzlepac/run_hla_flag_filter.py
|
srodney/drizzlepac
|
c554523331a6204ce113d4317b7286ad39094f74
|
[
"BSD-3-Clause"
] | 1
|
2020-09-02T18:08:39.000Z
|
2020-09-02T18:08:39.000Z
|
#!/usr/bin/env python
"""This script simply calls drizzlepac/hlautils/hla_flag_filter.py for test purposes"""
import json
import glob
import os
import pdb
import sys
from astropy.table import Table
import drizzlepac
from drizzlepac.hlautils import config_utils
from drizzlepac.hlautils import poller_utils
def run_hla_flag_filter():
from drizzlepac.hlautils import hla_flag_filter
# + + + + + + + + + + + + + + + + + + + + + + + + + + + +
# All below lines are to get it working, not actual final code.
out_file = glob.glob("??????.out")[0]
# out_file = "j92c01.out" # acs_10265_01
# #out_file = "j9es06.out" # acs_10595_06
# Get parameter values
if os.getcwd().endswith("orig"): sys.exit("Don't run in the orig dir! YOU'LL RUIN EVERYTHING!")
for cmd in ['rm -f *.*', 'cp orig/* .']:
print(cmd)
os.system(cmd)
obs_info_dict, total_list = poller_utils.interpret_obset_input(out_file)
out_pars_file = "pars.json"
for total_item in total_list:
total_item.configobj_pars = config_utils.HapConfig(total_item, output_custom_pars_file=out_pars_file,
use_defaults=True)
for filter_item in total_item.fdp_list:
filter_item.configobj_pars = config_utils.HapConfig(filter_item, output_custom_pars_file=out_pars_file,
use_defaults=True)
for expo_item in total_item.edp_list:
expo_item.configobj_pars = config_utils.HapConfig(expo_item, output_custom_pars_file=out_pars_file,
use_defaults=True)
# * * * * hla_flag_filter.run_source_list_flagging inputs for HLA Classic test run* * * *
if out_file == "j92c01.out": # acs_10265_01
# settings for testing ~/Documents/HLAtransition/runhlaprocessing_testing/acs_10265_01/flag_testing/hla
mode = "dao"
drizzled_image = "hst_10265_01_acs_wfc_f606w_drz.fits"
flt_list = ["j92c01b4q_flc.fits", "j92c01b5q_flc.fits", "j92c01b7q_flc.fits", "j92c01b9q_flc.fits"]
param_dict = total_list[0].fdp_list[0].configobj_pars.as_single_giant_dict()
param_dict['quality control']['ci filter']['sourcex_bthresh'] = 5.0 # force it to use the value from HLA classic
param_dict['quality control']['ci filter']['dao_bthresh'] = 5.0 # force it to use the value from HLA classic
exptime = 5060.0
catalog_name = "hst_10265_01_acs_wfc_f606w_{}phot.txt".format(mode)
catalog_data = Table.read(catalog_name, format='ascii')
proc_type = "{}phot".format(mode)
drz_root_dir = os.getcwd()
# for filt_key in filter_sorted_flt_dict.keys(): flt_list = filter_sorted_flt_dict[filt_key]
# os.remove("hst_10265_01_acs_wfc_f606w_msk.fits")
# from devutils import make_mask_file
# make_mask_file.make_mask_file_old(all_drizzled_filelist[0].replace("drz.fits","wht.fits"))
comp_cmd = "python /Users/dulude/Documents/Code/HLATransition/drizzlepac/drizzlepac/devutils/comparison_tools/compare_sourcelists.py orig/hst_10265_01_acs_wfc_f606w_{}phot_orig.txt hst_10265_01_acs_wfc_f606w_{}phot.txt -i hst_10265_01_acs_wfc_f606w_drz.fits hst_10265_01_acs_wfc_f606w_drz.fits -m absolute -p none".format(mode,mode)
if out_file == "j9es06.out": # acs_10595_06
# settings for testing ~/Documents/HLAtransition/runhlaprocessing_testing/acs_10595_06_flag_testing/
mode = "sex"
drizzled_image = "hst_10595_06_acs_wfc_f435w_drz.fits"
flt_list = ["j9es06rbq_flc.fits", "j9es06rcq_flc.fits", "j9es06req_flc.fits", "j9es06rgq_flc.fits"]
param_dict = total_list[0].fdp_list[0].configobj_pars.as_single_giant_dict()
param_dict['quality control']['ci filter']['sourcex_bthresh'] = 5.0 #force it to use the value from HLA classic
param_dict['quality control']['ci filter']['dao_bthresh'] = 5.0 # force it to use the value from HLA classic
exptime = 710.0
catalog_data = Table.read(catalog_name, format='ascii')
catalog_data = Table.read(dict_newTAB_matched2drz[all_drizzled_filelist[0]], format='ascii')
proc_type = "{}phot".format(mode)
drz_root_dir = os.getcwd()
# os.remove("hst_10595_06_acs_wfc_f435w_msk.fits")
# from devutils import make_mask_file
# make_mask_file.make_mask_file("hst_10595_06_acs_wfc_f435w_wht.fits")
comp_cmd = "python /Users/dulude/Documents/Code/HLATransition/drizzlepac/drizzlepac/devutils/comparison_tools/compare_sourcelists.py orig_cats/hst_10595_06_acs_wfc_f435w_{}phot.txt hst_10595_06_acs_wfc_f435w_{}phot.txt -i hst_10595_06_acs_wfc_f435w_drz.fits hst_10595_06_acs_wfc_f435w_drz.fits -m absolute -p none".format(mode,mode)
# + + + + + + + + + + + + + + + + + + + + + + + + + + + +
# Execute hla_flag_filter.run_source_list_flaging
catalog_data = hla_flag_filter.run_source_list_flaging(drizzled_image, flt_list,
param_dict, exptime,
catalog_name, catalog_data,
proc_type, drz_root_dir, debug = True)
catalog_data.write(catalog_name, delimiter=",",format='ascii',overwrite=True)
print("Wrote {}".format(catalog_name))
try:
os.system(comp_cmd)
except:
print("skipping automatic comparision run")
#=======================================================================================================================
def run_hla_flag_filter_HLAClassic():
from drizzlepac.hlautils import hla_flag_filter_HLAClassic
# + + + + + + + + + + + + + + + + + + + + + + + + + + + +
# All below lines are to get it working, not actual final code.
out_file = glob.glob("??????.out")[0]
# out_file = "j92c01.out" # acs_10265_01
# #out_file = "j9es06.out" # acs_10595_06
# Get parameter values
if os.getcwd().endswith("orig"): sys.exit("Don't run in the orig dir! YOU'LL RUIN EVERYTHING!")
for cmd in ['rm -f *.*', 'cp orig/* .']:
print(cmd)
os.system(cmd)
obs_info_dict, total_list = poller_utils.interpret_obset_input(out_file)
out_pars_file = "pars.json"
for total_item in total_list:
total_item.configobj_pars = config_utils.HapConfig(total_item, output_custom_pars_file=out_pars_file,
use_defaults=True)
for filter_item in total_item.fdp_list:
filter_item.configobj_pars = config_utils.HapConfig(filter_item, output_custom_pars_file=out_pars_file,
use_defaults=True)
for expo_item in total_item.edp_list:
expo_item.configobj_pars = config_utils.HapConfig(expo_item, output_custom_pars_file=out_pars_file,
use_defaults=True)
# * * * * hla_flag_filter.run_source_list_flagging inputs for HLA Classic test run* * * *
if out_file == "j92c01.out": # acs_10265_01
# settings for testing ~/Documents/HLAtransition/runhlaprocessing_testing/acs_10265_01/flag_testing/hla
mode = "dao"
all_drizzled_filelist = ["hst_10265_01_acs_wfc_f606w_drz.fits"]
working_hla_red = os.getcwd()
filter_sorted_flt_dict = {"f606w": ["j92c01b4q_flc.fits", "j92c01b5q_flc.fits", "j92c01b7q_flc.fits", "j92c01b9q_flc.fits"]}
param_dict = total_list[0].fdp_list[0].configobj_pars.as_single_giant_dict()
param_dict['quality control']['ci filter']['sourcex_bthresh'] = 5.0 # force it to use the value from HLA classic
param_dict['quality control']['ci filter']['dao_bthresh'] = 5.0 # force it to use the value from HLA classic
readnoise_dictionary_drzs = {"hst_10265_01_acs_wfc_f606w_drz.fits": 4.97749985}
scale_dict_drzs = {"hst_10265_01_acs_wfc_f606w_drz.fits": 0.05}
zero_point_AB_dict = {"hst_10265_01_acs_wfc_f606w_drz.fits": 26.5136022236}
exp_dictionary_scis = {"hst_10265_01_acs_wfc_f606w_drz.fits": 5060.0}
detection_image = "hst_10265_01_acs_wfc_total_drz.fits"
dict_newTAB_matched2drz = {"hst_10265_01_acs_wfc_f606w_drz.fits": "hst_10265_01_acs_wfc_f606w_{}phot.txt".format(mode)}
phot_table_matched2cat = {all_drizzled_filelist[0]: Table.read(dict_newTAB_matched2drz[all_drizzled_filelist[0]], format='ascii')}
proc_type = "{}phot".format(mode)
drz_root_dir = os.getcwd()
rms_dict = {"hst_10265_01_acs_wfc_f606w_drz.fits": "hst_10265_01_acs_wfc_f606w_rms.fits"}
# for filt_key in filter_sorted_flt_dict.keys(): flt_list = filter_sorted_flt_dict[filt_key]
# os.remove("hst_10265_01_acs_wfc_f606w_msk.fits")
# from devutils import make_mask_file
# make_mask_file.make_mask_file_old(all_drizzled_filelist[0].replace("drz.fits","wht.fits"))
comp_cmd = "python /Users/dulude/Documents/Code/HLATransition/drizzlepac/drizzlepac/devutils/comparison_tools/compare_sourcelists.py orig/hst_10265_01_acs_wfc_f606w_{}phot_orig.txt hst_10265_01_acs_wfc_f606w_{}phot.txt -i hst_10265_01_acs_wfc_f606w_drz.fits hst_10265_01_acs_wfc_f606w_drz.fits -m absolute -p none".format(mode,mode)
if out_file == "j9es06.out": # acs_10595_06
# settings for testing ~/Documents/HLAtransition/runhlaprocessing_testing/acs_10595_06_flag_testing/
mode = "sex"
all_drizzled_filelist = ["hst_10595_06_acs_wfc_f435w_drz.fits"]
working_hla_red = os.getcwd()
filter_sorted_flt_dict = {"f435w": ["j9es06rbq_flc.fits", "j9es06rcq_flc.fits", "j9es06req_flc.fits", "j9es06rgq_flc.fits"]}
param_dict = total_list[0].fdp_list[0].configobj_pars.as_single_giant_dict()
param_dict['quality control']['ci filter']['sourcex_bthresh'] = 5.0 #force it to use the value from HLA classic
param_dict['quality control']['ci filter']['dao_bthresh'] = 5.0 # force it to use the value from HLA classic
readnoise_dictionary_drzs = {"hst_10595_06_acs_wfc_f435w_drz.fits": 5.247499925}
scale_dict_drzs = {"hst_10595_06_acs_wfc_f435w_drz.fits": 0.05}
zero_point_AB_dict = {"hst_10595_06_acs_wfc_f435w_drz.fits": 25.6888167958}
exp_dictionary_scis = {"hst_10595_06_acs_wfc_f435w_drz.fits": 710.0}
detection_image = "hst_10595_06_acs_wfc_total_drz.fits"
dict_newTAB_matched2drz = {"hst_10595_06_acs_wfc_f435w_drz.fits": "hst_10595_06_acs_wfc_f435w_{}phot.txt".format(mode)}
phot_table_matched2cat = {all_drizzled_filelist[0]: Table.read(dict_newTAB_matched2drz[all_drizzled_filelist[0]], format='ascii')}
proc_type = "{}phot".format(mode)
drz_root_dir = os.getcwd()
rms_dict = {"hst_10595_06_acs_wfc_f435w_drz.fits": "hst_10595_06_acs_wfc_f435w_rms.fits"}
# os.remove("hst_10595_06_acs_wfc_f435w_msk.fits")
# from devutils import make_mask_file
# make_mask_file.make_mask_file("hst_10595_06_acs_wfc_f435w_wht.fits")
comp_cmd = "python /Users/dulude/Documents/Code/HLATransition/drizzlepac/drizzlepac/devutils/comparison_tools/compare_sourcelists.py orig_cats/hst_10595_06_acs_wfc_f435w_{}phot.txt hst_10595_06_acs_wfc_f435w_{}phot.txt -i hst_10595_06_acs_wfc_f435w_drz.fits hst_10595_06_acs_wfc_f435w_drz.fits -m absolute -p none".format(mode,mode)
# + + + + + + + + + + + + + + + + + + + + + + + + + + + +
# Execute hla_flag_filter.run_source_list_flaging
catalog_data = hla_flag_filter_HLAClassic.run_source_list_flaging(all_drizzled_filelist, filter_sorted_flt_dict,
param_dict, exp_dictionary_scis,
dict_newTAB_matched2drz, phot_table_matched2cat,
proc_type, drz_root_dir, debug = True)
catalog_name = dict_newTAB_matched2drz[all_drizzled_filelist[0]]
catalog_data.write(catalog_name, delimiter=",",format='ascii',overwrite=True)
print("Wrote {}".format(catalog_name))
try:
os.system(comp_cmd)
except:
print("skipping automatic comparision run")
if __name__ == "__main__":
run_hla_flag_filter_HLAClassic()
| 61.79703
| 340
| 0.667868
| 1,696
| 12,483
| 4.496462
| 0.126179
| 0.035405
| 0.03016
| 0.039208
| 0.906504
| 0.8968
| 0.891555
| 0.872804
| 0.835038
| 0.797797
| 0
| 0.075629
| 0.219338
| 12,483
| 201
| 341
| 62.104478
| 0.706927
| 0.232076
| 0
| 0.636364
| 0
| 0.030303
| 0.314895
| 0.197229
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015152
| false
| 0
| 0.083333
| 0
| 0.098485
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7f20ed98a090dda844e5340489d6c208513276d2
| 226
|
py
|
Python
|
components/studio/deployments/admin.py
|
ScilifelabDataCentre/stackn
|
00a65a16ff271f04548b3ff475c72dacbfd916df
|
[
"Apache-2.0"
] | null | null | null |
components/studio/deployments/admin.py
|
ScilifelabDataCentre/stackn
|
00a65a16ff271f04548b3ff475c72dacbfd916df
|
[
"Apache-2.0"
] | null | null | null |
components/studio/deployments/admin.py
|
ScilifelabDataCentre/stackn
|
00a65a16ff271f04548b3ff475c72dacbfd916df
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import DeploymentDefinition, DeploymentInstance, HelmResource
admin.site.register(HelmResource)
admin.site.register(DeploymentDefinition)
admin.site.register(DeploymentInstance)
| 28.25
| 74
| 0.862832
| 23
| 226
| 8.478261
| 0.478261
| 0.138462
| 0.261538
| 0.297436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066372
| 226
| 7
| 75
| 32.285714
| 0.924171
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
615cff61d92d443e22f1204db917f9dba1c6f6b4
| 10,987
|
py
|
Python
|
tools/run_tests/xds_k8s_test_driver/tests/url_map/metadata_filter_test.py
|
minerba/grpc
|
775362a2cea21363339d73215e3b9a1394ad55b2
|
[
"Apache-2.0"
] | null | null | null |
tools/run_tests/xds_k8s_test_driver/tests/url_map/metadata_filter_test.py
|
minerba/grpc
|
775362a2cea21363339d73215e3b9a1394ad55b2
|
[
"Apache-2.0"
] | null | null | null |
tools/run_tests/xds_k8s_test_driver/tests/url_map/metadata_filter_test.py
|
minerba/grpc
|
775362a2cea21363339d73215e3b9a1394ad55b2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from typing import Tuple
from absl import flags
from absl.testing import absltest
from framework import xds_url_map_testcase
from framework.test_app import client_app
# Type aliases
HostRule = xds_url_map_testcase.HostRule
PathMatcher = xds_url_map_testcase.PathMatcher
GcpResourceManager = xds_url_map_testcase.GcpResourceManager
DumpedXdsConfig = xds_url_map_testcase.DumpedXdsConfig
RpcTypeUnaryCall = xds_url_map_testcase.RpcTypeUnaryCall
RpcTypeEmptyCall = xds_url_map_testcase.RpcTypeEmptyCall
XdsTestClient = client_app.XdsTestClient
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_url_map_testcase)
_NUM_RPCS = 150
_TEST_METADATA_KEY = 'xds_md'
_TEST_METADATA_VALUE_EMPTY = 'empty_ytpme'
_TEST_METADATA = ((RpcTypeEmptyCall, _TEST_METADATA_KEY,
_TEST_METADATA_VALUE_EMPTY),)
match_labels = [{
'name': 'TRAFFICDIRECTOR_NETWORK_NAME',
'value': 'default-vpc'
}]
not_match_labels = [{'name': 'fake', 'value': 'fail'}]
class TestMetadataFilterMatchAll(xds_url_map_testcase.XdsUrlMapTestCase):
"""" The test url-map has two routeRules: the higher priority routes to
the default backends, but is supposed to be filtered out by TD because
of non-matching metadata filters. The lower priority routes to alternative
backends and metadata filter matches. Thus, it verifies that TD evaluates
metadata filters correctly."""
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [{
'priority': 0,
'matchRules': [{
'prefixMatch':
'/',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ALL',
'filterLabels': not_match_labels
}]
}],
'service': GcpResourceManager().default_backend_service()
}, {
'priority': 1,
'matchRules': [{
'prefixMatch':
'/grpc.testing.TestService/Empty',
'headerMatches': [{
'headerName': _TEST_METADATA_KEY,
'exactMatch': _TEST_METADATA_VALUE_EMPTY
}],
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ALL',
'filterLabels': match_labels
}]
}],
'service': GcpResourceManager().alternative_backend_service()
}]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(len(xds_config.rds['virtualHosts'][0]['routes']), 2)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['prefix'],
"/grpc.testing.TestService/Empty")
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['headers']
[0]['name'], _TEST_METADATA_KEY)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['headers']
[0]['exactMatch'], _TEST_METADATA_VALUE_EMPTY)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][1]['match']['prefix'],
"")
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(test_client,
rpc_types=[RpcTypeEmptyCall],
metadata=_TEST_METADATA,
num_rpcs=_NUM_RPCS)
self.assertEqual(
_NUM_RPCS,
rpc_distribution.empty_call_alternative_service_rpc_count)
class TestMetadataFilterMatchAny(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [{
'priority': 0,
'matchRules': [{
'prefixMatch':
'/',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ANY',
'filterLabels': not_match_labels
}]
}],
'service': GcpResourceManager().default_backend_service()
}, {
'priority': 1,
'matchRules': [{
'prefixMatch':
'/grpc.testing.TestService/Unary',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ANY',
'filterLabels': not_match_labels + match_labels
}]
}],
'service': GcpResourceManager().alternative_backend_service()
}]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(len(xds_config.rds['virtualHosts'][0]['routes']), 2)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['prefix'],
"/grpc.testing.TestService/Unary")
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][1]['match']['prefix'],
"")
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(test_client,
rpc_types=[RpcTypeUnaryCall],
num_rpcs=_NUM_RPCS)
self.assertEqual(
_NUM_RPCS,
rpc_distribution.unary_call_alternative_service_rpc_count)
class TestMetadataFilterMatchAnyAndAll(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [{
'priority': 0,
'matchRules': [{
'prefixMatch':
'/',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ALL',
'filterLabels': not_match_labels + match_labels
}]
}],
'service': GcpResourceManager().default_backend_service()
}, {
'priority': 1,
'matchRules': [{
'prefixMatch':
'/grpc.testing.TestService/Unary',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ANY',
'filterLabels': not_match_labels + match_labels
}]
}],
'service': GcpResourceManager().alternative_backend_service()
}]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(len(xds_config.rds['virtualHosts'][0]['routes']), 2)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['prefix'],
"/grpc.testing.TestService/Unary")
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][1]['match']['prefix'],
"")
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(test_client,
rpc_types=[RpcTypeUnaryCall],
num_rpcs=_NUM_RPCS)
self.assertEqual(
_NUM_RPCS,
rpc_distribution.unary_call_alternative_service_rpc_count)
class TestMetadataFilterMatchMultipleRules(
xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [{
'priority': 0,
'matchRules': [{
'prefixMatch':
'/',
'headerMatches': [{
'headerName': _TEST_METADATA_KEY,
'exactMatch': _TEST_METADATA_VALUE_EMPTY
}],
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ANY',
'filterLabels': match_labels
}]
}],
'service': GcpResourceManager().alternative_backend_service()
}, {
'priority': 1,
'matchRules': [{
'prefixMatch':
'/grpc.testing.TestService/Unary',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ALL',
'filterLabels': match_labels
}]
}],
'service': GcpResourceManager().default_backend_service()
}]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(len(xds_config.rds['virtualHosts'][0]['routes']), 3)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['headers']
[0]['name'], _TEST_METADATA_KEY)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['headers']
[0]['exactMatch'], _TEST_METADATA_VALUE_EMPTY)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][1]['match']['prefix'],
"/grpc.testing.TestService/Unary")
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][2]['match']['prefix'],
"")
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(test_client,
rpc_types=[RpcTypeEmptyCall],
metadata=_TEST_METADATA,
num_rpcs=_NUM_RPCS)
self.assertEqual(
_NUM_RPCS,
rpc_distribution.empty_call_alternative_service_rpc_count)
if __name__ == '__main__':
absltest.main()
| 39.521583
| 80
| 0.576408
| 971
| 10,987
| 6.226571
| 0.192585
| 0.04168
| 0.031757
| 0.063513
| 0.732881
| 0.726762
| 0.725935
| 0.724611
| 0.714357
| 0.702613
| 0
| 0.007852
| 0.316101
| 10,987
| 277
| 81
| 39.66426
| 0.796779
| 0.080459
| 0
| 0.844156
| 0
| 0
| 0.1617
| 0.027414
| 0
| 0
| 0
| 0
| 0.103896
| 1
| 0.051948
| false
| 0
| 0.030303
| 0
| 0.116883
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
617125f168844e031dc3dc197fac38fb76b23ec5
| 16,175
|
py
|
Python
|
test/api/drawing/test_drawing_objects.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
test/api/drawing/test_drawing_objects.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
test/api/drawing/test_drawing_objects.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="test_drawing_objects.py">
# Copyright (c) 2020 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import os
import dateutil.parser
import asposewordscloud.models.requests
from test.base_test_context import BaseTestContext
#
# Example of how to get drawing objects.
#
class TestDrawingObjects(BaseTestContext):
#
# Test for getting drawing objects from document.
#
def test_get_document_drawing_objects(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjects.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectsRequest(name=remoteFileName, node_path='sections/0', folder=remoteDataFolder)
result = self.words_api.get_document_drawing_objects(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_objects, 'Validate GetDocumentDrawingObjects response')
self.assertIsNotNone(result.drawing_objects.list, 'Validate GetDocumentDrawingObjects response')
self.assertEqual(1, len(result.drawing_objects.list))
#
# Test for getting drawing objects from document without node path.
#
def test_get_document_drawing_objects_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectsWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectsRequest(name=remoteFileName, folder=remoteDataFolder)
result = self.words_api.get_document_drawing_objects(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_objects, 'Validate GetDocumentDrawingObjectsWithoutNodePath response')
self.assertIsNotNone(result.drawing_objects.list, 'Validate GetDocumentDrawingObjectsWithoutNodePath response')
self.assertEqual(1, len(result.drawing_objects.list))
#
# Test for getting drawing object by specified index.
#
def test_get_document_drawing_object_by_index(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectByIndex.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectByIndexRequest(name=remoteFileName, index=0, node_path='sections/0', folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_by_index(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate GetDocumentDrawingObjectByIndex response')
self.assertEqual(300.0, result.drawing_object.height)
#
# Test for getting drawing object by specified index without node path.
#
def test_get_document_drawing_object_by_index_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectByIndexWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectByIndexRequest(name=remoteFileName, index=0, folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_by_index(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate GetDocumentDrawingObjectByIndexWithoutNodePath response')
self.assertEqual(300.0, result.drawing_object.height)
#
# Test for getting drawing object by specified index and format.
#
def test_render_drawing_object(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectByIndexWithFormat.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.RenderDrawingObjectRequest(name=remoteFileName, format='png', index=0, node_path='sections/0', folder=remoteDataFolder)
result = self.words_api.render_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for getting drawing object by specified index and format without node path.
#
def test_render_drawing_object_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectByIndexWithFormatWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.RenderDrawingObjectRequest(name=remoteFileName, format='png', index=0, folder=remoteDataFolder)
result = self.words_api.render_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for reading drawing object's image data.
#
def test_get_document_drawing_object_image_data(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectImageData.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectImageDataRequest(name=remoteFileName, index=0, node_path='sections/0', folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_image_data(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for reading drawing object's image data without node path.
#
def test_get_document_drawing_object_image_data_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectImageDataWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectImageDataRequest(name=remoteFileName, index=0, folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_image_data(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for getting drawing object OLE data.
#
def test_get_document_drawing_object_ole_data(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localDrawingFile = 'DocumentElements/DrawingObjects/sample_EmbeddedOLE.docx'
remoteFileName = 'TestGetDocumentDrawingObjectOleData.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localDrawingFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectOleDataRequest(name=remoteFileName, index=0, node_path='sections/0', folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_ole_data(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for getting drawing object OLE data without node path.
#
def test_get_document_drawing_object_ole_data_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localDrawingFile = 'DocumentElements/DrawingObjects/sample_EmbeddedOLE.docx'
remoteFileName = 'TestGetDocumentDrawingObjectOleDataWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localDrawingFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectOleDataRequest(name=remoteFileName, index=0, folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_ole_data(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for adding drawing object.
#
def test_insert_drawing_object(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestInsetDrawingObject.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
requestDrawingObject = asposewordscloud.DrawingObjectInsert(height=0.0, left=0.0, top=0.0, width=0.0, relative_horizontal_position='Margin', relative_vertical_position='Margin', wrap_type='Inline')
request = asposewordscloud.models.requests.InsertDrawingObjectRequest(name=remoteFileName, drawing_object=requestDrawingObject, image_file=open(os.path.join(self.local_test_folder, 'Common/aspose-cloud.png'), 'rb'), node_path='', folder=remoteDataFolder)
result = self.words_api.insert_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate InsertDrawingObject response')
self.assertEqual('0.3.7.1', result.drawing_object.node_id)
#
# Test for adding drawing object without node path.
#
def test_insert_drawing_object_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestInsetDrawingObjectWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
requestDrawingObject = asposewordscloud.DrawingObjectInsert(height=0.0, left=0.0, top=0.0, width=0.0, relative_horizontal_position='Margin', relative_vertical_position='Margin', wrap_type='Inline')
request = asposewordscloud.models.requests.InsertDrawingObjectRequest(name=remoteFileName, drawing_object=requestDrawingObject, image_file=open(os.path.join(self.local_test_folder, 'Common/aspose-cloud.png'), 'rb'), folder=remoteDataFolder)
result = self.words_api.insert_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate InsertDrawingObjectWithoutNodePath response')
self.assertEqual('0.3.7.1', result.drawing_object.node_id)
#
# Test for deleting drawing object.
#
def test_delete_drawing_object(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestDeleteDrawingObject.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.DeleteDrawingObjectRequest(name=remoteFileName, index=0, node_path='', folder=remoteDataFolder)
self.words_api.delete_drawing_object(request)
#
# Test for deleting drawing object without node path.
#
def test_delete_drawing_object_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestDeleteDrawingObjectWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.DeleteDrawingObjectRequest(name=remoteFileName, index=0, folder=remoteDataFolder)
self.words_api.delete_drawing_object(request)
#
# Test for updating drawing object.
#
def test_update_drawing_object(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestUpdateDrawingObject.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
requestDrawingObject = asposewordscloud.DrawingObjectUpdate(left=1.0)
request = asposewordscloud.models.requests.UpdateDrawingObjectRequest(name=remoteFileName, drawing_object=requestDrawingObject, image_file=open(os.path.join(self.local_test_folder, 'Common/aspose-cloud.png'), 'rb'), index=0, node_path='', folder=remoteDataFolder)
result = self.words_api.update_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate UpdateDrawingObject response')
self.assertEqual(1.0, result.drawing_object.left)
#
# Test for updating drawing object without node path.
#
def test_update_drawing_object_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestUpdateDrawingObjectWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
requestDrawingObject = asposewordscloud.DrawingObjectUpdate(left=1.0)
request = asposewordscloud.models.requests.UpdateDrawingObjectRequest(name=remoteFileName, drawing_object=requestDrawingObject, image_file=open(os.path.join(self.local_test_folder, 'Common/aspose-cloud.png'), 'rb'), index=0, folder=remoteDataFolder)
result = self.words_api.update_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate UpdateDrawingObjectWithoutNodePath response')
self.assertEqual(1.0, result.drawing_object.left)
| 53.559603
| 271
| 0.750108
| 1,693
| 16,175
| 6.972829
| 0.135263
| 0.063871
| 0.050826
| 0.023719
| 0.827531
| 0.811605
| 0.807624
| 0.793731
| 0.766709
| 0.754003
| 0
| 0.004581
| 0.149799
| 16,175
| 301
| 272
| 53.737542
| 0.853839
| 0.137682
| 0
| 0.588235
| 0
| 0
| 0.197592
| 0.151024
| 0
| 0
| 0
| 0
| 0.20915
| 1
| 0.104575
| false
| 0
| 0.026144
| 0
| 0.137255
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4eef1320c0e0691a8298a782442f1c3ab4a42e10
| 40
|
py
|
Python
|
testing/examples/import_error.py
|
dry-python/dependencies
|
1a8bba41ab42d0b5249b36471f5300d9faba81e7
|
[
"BSD-2-Clause"
] | 175
|
2018-07-21T13:04:44.000Z
|
2020-05-27T15:31:06.000Z
|
tests/helpers/examples/import_error.py
|
proofit404/dependencies
|
204e0cfadca801d64857f24aa4c74e7939ed9af0
|
[
"BSD-2-Clause"
] | 325
|
2016-05-16T11:16:11.000Z
|
2022-03-04T00:45:57.000Z
|
testing/examples/import_error.py
|
dry-python/dependencies
|
1a8bba41ab42d0b5249b36471f5300d9faba81e7
|
[
"BSD-2-Clause"
] | 18
|
2018-06-17T09:33:16.000Z
|
2020-05-20T18:12:30.000Z
|
from astral import Vision # noqa: F401
| 20
| 39
| 0.75
| 6
| 40
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 0.2
| 40
| 1
| 40
| 40
| 0.84375
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9c86a8fde028ad53edc3558eed458f5bce3f030f
| 2,214
|
py
|
Python
|
test/test_oximachine.py
|
ltalirz/oximachinerunner
|
ca8092a8b247216cb98b7d308862dba184e27f1e
|
[
"MIT"
] | null | null | null |
test/test_oximachine.py
|
ltalirz/oximachinerunner
|
ca8092a8b247216cb98b7d308862dba184e27f1e
|
[
"MIT"
] | null | null | null |
test/test_oximachine.py
|
ltalirz/oximachinerunner
|
ca8092a8b247216cb98b7d308862dba184e27f1e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# pylint:disable=missing-module-docstring, missing-function-docstring
import os
from oximachinerunner import OximachineRunner
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
def test_oximachine():
runner = OximachineRunner()
output = runner.run_oximachine(
os.path.join(THIS_DIR, "..", "oximachinerunner/assets/ACODAA.cif")
)
assert len(output) == 5
assert output["prediction"] == [2, 2]
assert output["metal_indices"] == [0, 1]
assert output["metal_symbols"] == ["Fe", "Fe"]
output = runner.run_oximachine(os.path.join(THIS_DIR, "..", "examples/guvzee.cif"))
assert output["prediction"] == [
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
]
output = runner.run_oximachine(
os.path.join(THIS_DIR, "..", "examples/GUVZII_clean.cif")
)
assert output["prediction"] == [
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
]
output = runner.run_oximachine(
os.path.join(THIS_DIR, "..", "examples/IDIWOH_clean.cif")
)
assert output["prediction"] == [4, 4, 4, 4]
output = runner.run_oximachine(
os.path.join(THIS_DIR, "..", "examples/IDIWIB_clean.cif")
)
assert output["prediction"] == [3, 3, 3, 3]
# testing the MOF model
runner = OximachineRunner(modelname="mof")
output = runner.run_oximachine(
os.path.join(THIS_DIR, "..", "examples/IDIWIB_clean.cif")
)
assert output["prediction"] == [3, 3, 3, 3]
output = runner.run_oximachine(
os.path.join(THIS_DIR, "..", "examples/IDIWOH_clean.cif")
)
assert output["prediction"] == [4, 4, 4, 4]
output = runner.run_oximachine(
os.path.join(THIS_DIR, "..", "oximachinerunner/assets/ACODAA.cif")
)
assert len(output) == 5
assert output["prediction"] == [2, 2]
assert output["metal_indices"] == [0, 1]
assert output["metal_symbols"] == ["Fe", "Fe"]
| 23.305263
| 87
| 0.531165
| 254
| 2,214
| 4.507874
| 0.208661
| 0.040175
| 0.052402
| 0.059389
| 0.772926
| 0.746725
| 0.746725
| 0.746725
| 0.724891
| 0.724891
| 0
| 0.041257
| 0.310298
| 2,214
| 94
| 88
| 23.553191
| 0.708579
| 0.050136
| 0
| 0.7875
| 0
| 0
| 0.176751
| 0.091949
| 0
| 0
| 0
| 0
| 0.175
| 1
| 0.0125
| false
| 0
| 0.025
| 0
| 0.0375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9cde9f8cf5534efc1b5cec7d28cf00865e25ee25
| 113
|
py
|
Python
|
fpakman/core/resource.py
|
vinifmor/fpakman
|
a719991b8f7ecf366d44fdf074f5950767bdf121
|
[
"Zlib"
] | 39
|
2019-06-15T08:27:12.000Z
|
2021-11-08T03:33:01.000Z
|
fpakman/core/resource.py
|
vinifmor/fpakman
|
a719991b8f7ecf366d44fdf074f5950767bdf121
|
[
"Zlib"
] | 10
|
2019-06-16T12:16:19.000Z
|
2020-06-21T18:49:05.000Z
|
fpakman/core/resource.py
|
vinifmor/fpakman
|
a719991b8f7ecf366d44fdf074f5950767bdf121
|
[
"Zlib"
] | 3
|
2019-08-01T12:38:46.000Z
|
2020-04-30T20:40:23.000Z
|
from fpakman import ROOT_DIR
def get_path(resource_path):
return ROOT_DIR + '/resources/' + resource_path
| 16.142857
| 51
| 0.752212
| 16
| 113
| 5
| 0.6875
| 0.175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168142
| 113
| 6
| 52
| 18.833333
| 0.851064
| 0
| 0
| 0
| 0
| 0
| 0.098214
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
9ceeadf5aa7e878adeab42926d0d1a41ef275e04
| 3,943
|
py
|
Python
|
SS-GCNs/gnns/gin_net.py
|
TAMU-VITA/SS-GCNs
|
644f8a5f3b507be6d59be02747be406fabd8b8f9
|
[
"MIT"
] | 1
|
2021-06-07T15:18:10.000Z
|
2021-06-07T15:18:10.000Z
|
SS-GCNs/gnns/gin_net.py
|
TAMU-VITA/SS-GCNs
|
644f8a5f3b507be6d59be02747be406fabd8b8f9
|
[
"MIT"
] | null | null | null |
SS-GCNs/gnns/gin_net.py
|
TAMU-VITA/SS-GCNs
|
644f8a5f3b507be6d59be02747be406fabd8b8f9
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.nn.pytorch.glob import SumPooling, AvgPooling, MaxPooling
"""
GIN: Graph Isomorphism Networks
HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
https://arxiv.org/pdf/1810.00826.pdf
"""
from gnns.gin_layer import GINLayer, ApplyNodeFunc, MLP
class GINNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params[0]
hidden_dim = net_params[1]
n_classes = net_params[2]
dropout = 0.5
self.n_layers = 2
n_mlp_layers = 1 # GIN
learn_eps = True # GIN
neighbor_aggr_type = 'mean' # GIN
graph_norm = False
batch_norm = False
residual = False
self.n_classes = n_classes
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
for layer in range(self.n_layers):
if layer == 0:
mlp = MLP(n_mlp_layers, in_dim, hidden_dim, hidden_dim)
else:
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, n_classes)
self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
dropout, graph_norm, batch_norm, residual, 0, learn_eps))
# Linear function for output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = nn.Linear(hidden_dim, n_classes, bias=False)
def forward(self, g, h, snorm_n, snorm_e):
# list of hidden representation at each layer (including input)
hidden_rep = []
for i in range(self.n_layers):
h = self.ginlayers[i](g, h, snorm_n)
hidden_rep.append(h)
# score_over_layer = (self.linears_prediction(hidden_rep[0]) + hidden_rep[1]) / 2
score_over_layer = (self.linears_prediction(hidden_rep[0]) + hidden_rep[1]) / 2
return score_over_layer
class GINNet_ss(nn.Module):
def __init__(self, net_params, num_par):
super().__init__()
in_dim = net_params[0]
hidden_dim = net_params[1]
n_classes = net_params[2]
dropout = 0.5
self.n_layers = 2
n_mlp_layers = 1 # GIN
learn_eps = True # GIN
neighbor_aggr_type = 'mean' # GIN
graph_norm = False
batch_norm = False
residual = False
self.n_classes = n_classes
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
for layer in range(self.n_layers):
if layer == 0:
mlp = MLP(n_mlp_layers, in_dim, hidden_dim, hidden_dim)
else:
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, n_classes)
self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
dropout, graph_norm, batch_norm, residual, 0, learn_eps))
# Linear function for output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = nn.Linear(hidden_dim, n_classes, bias=False)
self.classifier_ss = nn.Linear(hidden_dim, num_par, bias=False)
def forward(self, g, h, snorm_n, snorm_e):
# list of hidden representation at each layer (including input)
hidden_rep = []
for i in range(self.n_layers):
h = self.ginlayers[i](g, h, snorm_n)
hidden_rep.append(h)
score_over_layer = (self.linears_prediction(hidden_rep[0]) + hidden_rep[1]) / 2
h_ss = self.classifier_ss(hidden_rep[0])
return score_over_layer, h_ss
| 33.700855
| 113
| 0.584834
| 505
| 3,943
| 4.310891
| 0.227723
| 0.053744
| 0.030317
| 0.022049
| 0.794212
| 0.794212
| 0.794212
| 0.768489
| 0.768489
| 0.768489
| 0
| 0.015595
| 0.333249
| 3,943
| 116
| 114
| 33.991379
| 0.812476
| 0.118438
| 0
| 0.8
| 0
| 0
| 0.002452
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.085714
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9cef8673569c093f97353e11647a929d7f02a79c
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/clikit/api/command/command_collection.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/clikit/api/command/command_collection.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/clikit/api/command/command_collection.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/0e/03/a2/8516ce170f58c40a340c994a5cb76273f276d7ad1ea824422b51c9e45c
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.447917
| 0
| 96
| 1
| 96
| 96
| 0.447917
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9cf644b1b80793072b365bad95258387b0ed2c8b
| 122
|
py
|
Python
|
cdc/src/__init__.py
|
ZebinKang/cdc
|
a32fe41892021d29a1d9c534728a92b67f9b6cea
|
[
"MIT"
] | null | null | null |
cdc/src/__init__.py
|
ZebinKang/cdc
|
a32fe41892021d29a1d9c534728a92b67f9b6cea
|
[
"MIT"
] | null | null | null |
cdc/src/__init__.py
|
ZebinKang/cdc
|
a32fe41892021d29a1d9c534728a92b67f9b6cea
|
[
"MIT"
] | null | null | null |
from NoteDeid import *
from NoteConceptParser import *
from Converter import *
from D2v import *
from MLPipeline import *
| 20.333333
| 31
| 0.795082
| 15
| 122
| 6.466667
| 0.466667
| 0.412371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009804
| 0.163934
| 122
| 5
| 32
| 24.4
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
146ce26ee142df10da663c661efd59cf5bef1b60
| 10,674
|
py
|
Python
|
tests/test_packages/test_skills/test_tac_negotiation/test_helpers.py
|
bryanchriswhite/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 126
|
2019-09-07T09:32:44.000Z
|
2022-03-29T14:28:41.000Z
|
tests/test_packages/test_skills/test_tac_negotiation/test_helpers.py
|
salman6049/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 1,814
|
2019-08-24T10:08:07.000Z
|
2022-03-31T14:28:36.000Z
|
tests/test_packages/test_skills/test_tac_negotiation/test_helpers.py
|
salman6049/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 46
|
2019-09-03T22:13:58.000Z
|
2022-03-22T01:25:16.000Z
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the helpers module of the tac negotiation."""
from pathlib import Path
from aea.helpers.search.models import (
Attribute,
Constraint,
ConstraintType,
DataModel,
Description,
)
from aea.test_tools.test_skill import BaseSkillTestCase
from packages.fetchai.skills.tac_negotiation.helpers import (
DEMAND_DATAMODEL_NAME,
SUPPLY_DATAMODEL_NAME,
_build_goods_datamodel,
build_goods_description,
build_goods_query,
)
from tests.conftest import ROOT_DIR
class TestHelpers(BaseSkillTestCase):
"""Test Helper module methods of tac control."""
path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "tac_negotiation")
@classmethod
def setup(cls):
"""Setup the test class."""
super().setup()
def test_build_goods_datamodel_supply(self):
"""Test the _build_goods_datamodel of Helpers module for a supply."""
good_ids = ["1", "2"]
is_supply = True
attributes = [
Attribute("1", int, True, "A good on offer."),
Attribute("2", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
actual_data_model = _build_goods_datamodel(good_ids, is_supply)
assert actual_data_model == expected_data_model
def test_build_goods_datamodel_demand(self):
"""Test the _build_goods_datamodel of Helpers module for a demand."""
good_ids = ["1", "2"]
is_supply = False
attributes = [
Attribute("1", int, True, "A good on offer."),
Attribute("2", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(DEMAND_DATAMODEL_NAME, attributes)
actual_data_model = _build_goods_datamodel(good_ids, is_supply)
assert actual_data_model == expected_data_model
def test_build_goods_description_supply(self):
"""Test the build_goods_description of Helpers module for supply."""
quantities_by_good_id = {"2": 5, "3": 10}
currency_id = "1"
ledger_id = "some_ledger_id"
is_supply = True
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("3", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
expected_values = {"currency_id": currency_id, "ledger_id": ledger_id}
expected_values.update(quantities_by_good_id)
expected_description = Description(expected_values, expected_data_model)
actual_description = build_goods_description(
quantities_by_good_id, currency_id, ledger_id, is_supply
)
assert actual_description == expected_description
def test_build_goods_description_demand(self):
"""Test the build_goods_description of Helpers module for demand (same as above)."""
quantities_by_good_id = {"2": 5, "3": 10}
currency_id = "1"
ledger_id = "some_ledger_id"
is_supply = False
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("3", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(DEMAND_DATAMODEL_NAME, attributes)
expected_values = {"currency_id": currency_id, "ledger_id": ledger_id}
expected_values.update(quantities_by_good_id)
expected_description = Description(expected_values, expected_data_model)
actual_description = build_goods_description(
quantities_by_good_id, currency_id, ledger_id, is_supply
)
assert actual_description == expected_description
def test_build_goods_query(self):
"""Test the build_goods_query of Helpers module."""
good_ids = ["2", "3"]
currency_id = "1"
ledger_id = "some_ledger_id"
is_searching_for_sellers = True
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("3", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
expected_constraints = [
Constraint("2", ConstraintType(">=", 1)),
Constraint("3", ConstraintType(">=", 1)),
Constraint("ledger_id", ConstraintType("==", ledger_id)),
Constraint("currency_id", ConstraintType("==", currency_id)),
]
actual_query = build_goods_query(
good_ids, currency_id, ledger_id, is_searching_for_sellers
)
constraints = [
(c.constraint_type.type, c.constraint_type.value)
for c in actual_query.constraints[0].constraints
]
for constraint in expected_constraints:
assert (
constraint.constraint_type.type,
constraint.constraint_type.value,
) in constraints
assert actual_query.model == expected_data_model
def test_build_goods_query_1_good(self):
"""Test the build_goods_query of Helpers module where there is 1 good."""
good_ids = ["2"]
currency_id = "1"
ledger_id = "some_ledger_id"
is_searching_for_sellers = True
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
expected_constraints = [
Constraint("2", ConstraintType(">=", 1)),
Constraint("ledger_id", ConstraintType("==", ledger_id)),
Constraint("currency_id", ConstraintType("==", currency_id)),
]
actual_query = build_goods_query(
good_ids, currency_id, ledger_id, is_searching_for_sellers
)
for constraint in expected_constraints:
assert constraint in actual_query.constraints
assert actual_query.model == expected_data_model
| 37.985765
| 92
| 0.570639
| 1,126
| 10,674
| 5.195382
| 0.14032
| 0.035556
| 0.018462
| 0.024615
| 0.798291
| 0.772479
| 0.762564
| 0.74547
| 0.72547
| 0.711453
| 0
| 0.00748
| 0.323684
| 10,674
| 280
| 93
| 38.121429
| 0.802881
| 0.120386
| 0
| 0.770925
| 0
| 0
| 0.205983
| 0
| 0
| 0
| 0
| 0
| 0.035242
| 1
| 0.030837
| false
| 0
| 0.022026
| 0
| 0.061674
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
14b757978821b5341ed6a4a277fcfd2e75bc9742
| 107
|
py
|
Python
|
egs/codeswitching/asr/local_yzl23/test_libsndfile.py
|
luyizhou4/espnet
|
a408b9372df3f57ef33b8a378a8d9abc7f872cf5
|
[
"Apache-2.0"
] | null | null | null |
egs/codeswitching/asr/local_yzl23/test_libsndfile.py
|
luyizhou4/espnet
|
a408b9372df3f57ef33b8a378a8d9abc7f872cf5
|
[
"Apache-2.0"
] | null | null | null |
egs/codeswitching/asr/local_yzl23/test_libsndfile.py
|
luyizhou4/espnet
|
a408b9372df3f57ef33b8a378a8d9abc7f872cf5
|
[
"Apache-2.0"
] | null | null | null |
from ctypes.util import find_library as _find_library
print(_find_library('sndfile'))
print('test fine')
| 17.833333
| 53
| 0.794393
| 16
| 107
| 5
| 0.6875
| 0.4125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102804
| 107
| 5
| 54
| 21.4
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.149533
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
1ae51ac2c341ebe5300267cfbe20cb5e5c501fda
| 1,816
|
py
|
Python
|
tests/format_directory_test.py
|
garysb/dismantle
|
b2aeed5916f980c20852d99ae379b0dc1da5a135
|
[
"MIT"
] | 2
|
2021-06-02T12:37:13.000Z
|
2021-06-08T07:13:20.000Z
|
tests/format_directory_test.py
|
garysb/dismantle
|
b2aeed5916f980c20852d99ae379b0dc1da5a135
|
[
"MIT"
] | 5
|
2021-06-29T09:56:15.000Z
|
2021-07-12T09:41:19.000Z
|
tests/format_directory_test.py
|
area28technologies/dismantle
|
b2aeed5916f980c20852d99ae379b0dc1da5a135
|
[
"MIT"
] | 1
|
2021-12-12T06:17:27.000Z
|
2021-12-12T06:17:27.000Z
|
import os
from pathlib import Path
import pytest
from dismantle.package import DirectoryPackageFormat, PackageFormat
def test_inherits() -> None:
assert issubclass(DirectoryPackageFormat, PackageFormat) is True
def test_grasp_exists(datadir: Path) -> None:
src = datadir.join('directory_src')
assert DirectoryPackageFormat.grasps(src) is True
def test_grasp_non_existant(datadir: Path) -> None:
src = datadir.join('directory_non_existant')
assert DirectoryPackageFormat.grasps(src) is False
def test_grasp_not_supported(datadir: Path) -> None:
src = datadir.join('package.zip')
assert DirectoryPackageFormat.grasps(src) is False
def test_extract_not_supported(datadir: Path) -> None:
src = datadir.join('package.zip')
dest = datadir.join(f'{src}_output')
message = 'formatter only supports directories'
with pytest.raises(ValueError, match=message):
DirectoryPackageFormat.extract(src, dest)
def test_extract_non_existant(datadir: Path) -> None:
src = datadir.join('directory_non_existant')
dest = datadir.join(f'{src}_output')
message = 'formatter only supports directories'
with pytest.raises(ValueError, match=message):
DirectoryPackageFormat.extract(src, dest)
def test_extract_already_exists(datadir: Path) -> None:
src = datadir.join('directory_src')
dest = datadir.join('directory_exists')
DirectoryPackageFormat.extract(src, dest)
assert os.path.exists(dest) is True
assert os.path.exists(dest / 'package.json') is True
def test_extract_create(datadir: Path) -> None:
src = datadir.join('directory_src')
dest = datadir.join('directory_created')
DirectoryPackageFormat.extract(src, dest)
assert os.path.exists(dest) is True
assert os.path.exists(dest / 'package.json') is True
| 32.428571
| 68
| 0.740639
| 225
| 1,816
| 5.84
| 0.213333
| 0.092085
| 0.079909
| 0.09589
| 0.829528
| 0.773973
| 0.773973
| 0.773973
| 0.696347
| 0.656012
| 0
| 0
| 0.154185
| 1,816
| 55
| 69
| 33.018182
| 0.855469
| 0
| 0
| 0.589744
| 0
| 0
| 0.140969
| 0.024229
| 0
| 0
| 0
| 0
| 0.205128
| 1
| 0.205128
| false
| 0
| 0.102564
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1afb4e419b6e7623430e399ba3b927cbbb015ac9
| 132
|
py
|
Python
|
api/companies/urls.py
|
anjaekk/CRM-internship-
|
94eab9401a7336ebbb11046a77c59b1d07e2bf68
|
[
"MIT"
] | 1
|
2021-09-10T09:11:08.000Z
|
2021-09-10T09:11:08.000Z
|
api/companies/urls.py
|
anjaekk/CRM-site-project
|
94eab9401a7336ebbb11046a77c59b1d07e2bf68
|
[
"MIT"
] | null | null | null |
api/companies/urls.py
|
anjaekk/CRM-site-project
|
94eab9401a7336ebbb11046a77c59b1d07e2bf68
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from .views import CompanyAPIView
# urlpatterns = [
# path("",include(router.urls)),
# ]
| 18.857143
| 37
| 0.69697
| 15
| 132
| 6.133333
| 0.666667
| 0.23913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 132
| 7
| 38
| 18.857143
| 0.836364
| 0.393939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0d52e3e144e777e66888716d6fd11de6d57fc9e0
| 11,717
|
py
|
Python
|
tests/test_geometric_tests.py
|
mxrie-eve/Pyrr
|
34802ba0393a6e7752cf55fadecd0d7824042dc0
|
[
"Unlicense"
] | null | null | null |
tests/test_geometric_tests.py
|
mxrie-eve/Pyrr
|
34802ba0393a6e7752cf55fadecd0d7824042dc0
|
[
"Unlicense"
] | null | null | null |
tests/test_geometric_tests.py
|
mxrie-eve/Pyrr
|
34802ba0393a6e7752cf55fadecd0d7824042dc0
|
[
"Unlicense"
] | null | null | null |
from pyrr.geometric_tests import ray_intersect_sphere
try:
import unittest2 as unittest
except:
import unittest
import numpy as np
from pyrr import geometric_tests as gt
from pyrr import line, plane, ray, sphere
class test_geometric_tests(unittest.TestCase):
def test_import(self):
import pyrr
pyrr.geometric_tests
from pyrr import geometric_tests
def test_point_intersect_line(self):
p = np.array([1.,1.,1.])
l = np.array([[0.,0.,0.],[2.,2.,2.]])
result = gt.point_intersect_line(p, l)
self.assertTrue(np.array_equal(result, p))
def test_point_intersect_line_invalid(self):
p = np.array([3.,3.,3.])
l = np.array([[0.,0.,0.],[2.,2.,2.]])
result = gt.point_intersect_line(p, l)
self.assertTrue(np.array_equal(result, p))
def test_point_intersect_line_segment(self):
p = np.array([1.,1.,1.])
l = np.array([[0.,0.,0.],[2.,2.,2.]])
result = gt.point_intersect_line_segment(p, l)
self.assertTrue(np.array_equal(result, p))
def test_point_intersect_line_segment_invalid(self):
p = np.array([3.,3.,3.])
l = np.array([[0.,0.,0.],[2.,2.,2.]])
result = gt.point_intersect_line_segment(p, l)
self.assertEqual(result, None)
def test_point_intersect_rectangle_valid_intersections_1(self):
r = np.array([
[0.0, 0.0],
[5.0, 5.0]
])
p = [ 0.0, 0.0]
result = gt.point_intersect_rectangle(p, r)
self.assertTrue(np.array_equal(result, p))
def test_point_intersect_rectangle_valid_intersections_2(self):
r = np.array([
[0.0, 0.0],
[5.0, 5.0]
])
p = [ 5.0, 5.0]
result = gt.point_intersect_rectangle(p, r)
self.assertTrue(np.array_equal(result, p))
def test_point_intersect_rectangle_valid_intersections_3(self):
r = np.array([
[0.0, 0.0],
[5.0, 5.0]
])
p = [ 1.0, 1.0]
result = gt.point_intersect_rectangle(p, r)
self.assertTrue(np.array_equal(result, p))
def test_point_intersect_rectangle_invalid_intersections_1(self):
r = np.array([
[0.0, 0.0],
[5.0, 5.0]
])
p = [-1.0, 1.0]
result = gt.point_intersect_rectangle(p, r)
self.assertFalse(np.array_equal(result, p))
def test_point_intersect_rectangle_invalid_intersections_2(self):
r = np.array([
[0.0, 0.0],
[5.0, 5.0]
])
p = [ 1.0, 10.0]
result = gt.point_intersect_rectangle(p, r)
self.assertFalse(np.array_equal(result, p))
def test_point_intersect_rectangle_invalid_intersections_3(self):
rect = np.array([
[0.0, 0.0],
[5.0, 5.0]
])
point = [ 1.0,-1.0]
result = gt.point_intersect_rectangle(point, rect)
self.assertFalse(np.array_equal(result, point))
def test_ray_intersect_plane(self):
r = ray.create([0.,-1.,0.],[0.,1.,0.])
p = plane.create([0.,1.,0.], 0.)
result = gt.ray_intersect_plane(r, p)
self.assertFalse(np.array_equal(result, [0.,1.,0.]))
def test_ray_intersect_plane_front_only(self):
r = ray.create([0.,-1.,0.],[0.,1.,0.])
p = plane.create([0.,1.,0.], 0.)
result = gt.ray_intersect_plane(r, p, front_only=True)
self.assertEqual(result, None)
def test_ray_intersect_plane_invalid(self):
r = ray.create([0.,-1.,0.],[1.,0.,0.])
p = plane.create([0.,1.,0.], 0.)
result = gt.ray_intersect_plane(r, p)
self.assertEqual(result, None)
def test_point_closest_point_on_ray(self):
l = line.create_from_points(
[ 0.0, 0.0, 0.0 ],
[10.0, 0.0, 0.0 ]
)
p = np.array([ 0.0, 1.0, 0.0])
result = gt.point_closest_point_on_ray(p, l)
self.assertTrue(np.array_equal(result, [ 0.0, 0.0, 0.0]))
def test_point_closest_point_on_line(self):
p = np.array([0.,1.,0.])
l = np.array([[0.,0.,0.],[2.,0.,0.]])
result = gt.point_closest_point_on_line(p, l)
self.assertTrue(np.array_equal(result, [0.,0.,0.]), (result,))
def test_point_closest_point_on_line_2(self):
p = np.array([3.,0.,0.])
l = np.array([[0.,0.,0.],[2.,0.,0.]])
result = gt.point_closest_point_on_line(p, l)
self.assertTrue(np.array_equal(result, [3.,0.,0.]), (result,))
def test_point_closest_point_on_line_segment(self):
p = np.array([0.,1.,0.])
l = np.array([[0.,0.,0.],[2.,0.,0.]])
result = gt.point_closest_point_on_line_segment(p, l)
self.assertTrue(np.array_equal(result, [0.,0.,0.]), (result,))
def test_vector_parallel_vector(self):
v1 = np.array([1.,0.,0.])
v2 = np.array([2.,0.,0.])
self.assertTrue(gt.vector_parallel_vector(v1,v2))
def test_vector_parallel_vector_invalid(self):
v1 = np.array([1.,0.,0.])
v2 = np.array([0.,1.,0.])
self.assertTrue(False == gt.vector_parallel_vector(v1,v2))
def test_ray_parallel_ray(self):
r1 = ray.create([0.,0.,0.],[1.,0.,0.])
r2 = ray.create([1.,0.,0.],[2.,0.,0.])
self.assertTrue(gt.ray_parallel_ray(r1,r2))
def test_ray_parallel_ray_2(self):
r1 = ray.create([0.,0.,0.],[1.,0.,0.])
r2 = ray.create([1.,0.,0.],[0.,1.,0.])
self.assertTrue(False == gt.ray_parallel_ray(r1,r2))
def test_ray_parallel_ray_3(self):
r1 = ray.create([0.,0.,0.],[1.,0.,0.])
r2 = ray.create([0.,1.,0.],[1.,0.,0.])
self.assertTrue(gt.ray_parallel_ray(r1,r2))
def test_ray_coincident_ray(self):
r1 = ray.create([0.,0.,0.],[1.,0.,0.])
r2 = ray.create([1.,0.,0.],[2.,0.,0.])
self.assertTrue(gt.ray_coincident_ray(r1,r2))
def test_ray_coincident_ray_2(self):
r1 = ray.create([0.,0.,0.],[1.,0.,0.])
r2 = ray.create([1.,0.,0.],[0.,1.,0.])
self.assertTrue(False == gt.ray_coincident_ray(r1,r2))
def test_ray_coincident_ray_3(self):
r1 = ray.create([0.,0.,0.],[1.,0.,0.])
r2 = ray.create([0.,1.,0.],[1.,0.,0.])
self.assertTrue(False == gt.ray_coincident_ray(r1,r2))
def test_ray_intersect_aabb_valid_1(self):
a = np.array([[-1.0,-1.0,-1.0], [ 1.0, 1.0, 1.0]])
r = np.array([[ 0.5, 0.5, 0.0], [ 0.0, 0.0,-1.0]])
result = gt.ray_intersect_aabb(r, a)
self.assertTrue(np.array_equal(result, [ 0.5, 0.5,-1.0]))
def test_ray_intersect_aabb_valid_2(self):
a = np.array([[-1.0,-1.0,-1.0], [ 1.0, 1.0, 1.0]])
r = np.array([[2.0, 2.0, 2.0], [ -1.0, -1.0, -1.0]])
result = gt.ray_intersect_aabb(r, a)
self.assertTrue(np.array_equal(result, [1.0, 1.0, 1.0]))
def test_ray_intersect_aabb_valid_3(self):
a = np.array([[-1.0, -1.0, -1.0], [1.0, 1.0, 1.0]])
r = np.array([[.5, .5, .5], [0, 0, 1.0]])
result = gt.ray_intersect_aabb(r, a)
self.assertTrue(np.array_equal(result, [.5, .5, 1.0]))
def test_ray_intersect_aabb_invalid_1(self):
a = np.array([[-1.0,-1.0,-1.0], [ 1.0, 1.0, 1.0]])
r = np.array([[2.0, 2.0, 2.0], [ 1.0, 1.0, 1.0]])
result = gt.ray_intersect_aabb(r, a)
self.assertEqual(result, None)
def test_point_height_above_plane(self):
pl = plane.create([0., 1., 0.], 1.)
p = np.array([0., 1., 0.])
result = gt.point_height_above_plane(p, pl)
self.assertEqual(result, 0.)
p = np.array([0., 0., 0.])
result = gt.point_height_above_plane(p, pl)
self.assertEqual(result, -1.)
v1 = np.array([ 0.0, 0.0, 1.0])
v2 = np.array([ 1.0, 0.0, 1.0])
v3 = np.array([ 0.0, 1.0, 1.0])
p = np.array([0.0, 0.0, 20.0])
pl = plane.create_from_points(v1, v2, v3)
pl = plane.invert_normal(pl)
result = gt.point_height_above_plane(p, pl)
self.assertEqual(result, 19.)
pl = plane.create_xz(distance=5.)
p = np.array([0., 5., 0.])
h = gt.point_height_above_plane(p, pl)
self.assertEqual(h, 0.)
def test_point_closest_point_on_plane(self):
pl = np.array([ 0.0, 1.0, 0.0, 0.0])
p = np.array([ 5.0, 20.0, 5.0])
result = gt.point_closest_point_on_plane(p, pl)
self.assertTrue(np.array_equal(result, [ 5.0, 0.0, 5.0]))
def test_sphere_does_intersect_sphere_1(self):
s1 = sphere.create()
s2 = sphere.create()
self.assertTrue(gt.sphere_does_intersect_sphere(s1, s2))
def test_sphere_does_intersect_sphere_2(self):
s1 = sphere.create()
s2 = sphere.create([1.,0.,0.])
self.assertTrue(gt.sphere_does_intersect_sphere(s1, s2))
def test_sphere_does_intersect_sphere_3(self):
s1 = sphere.create()
s2 = sphere.create([2.,0.,0.], 1.0)
self.assertTrue(gt.sphere_does_intersect_sphere(s1, s2))
def test_sphere_does_intersect_sphere_4(self):
s1 = sphere.create()
s2 = sphere.create([2.,0.,0.], 0.5)
self.assertTrue(False == gt.sphere_does_intersect_sphere(s1, s2))
def test_sphere_penetration_sphere_1(self):
s1 = sphere.create()
s2 = sphere.create()
self.assertEqual(gt.sphere_penetration_sphere(s1, s2), 2.0)
def test_sphere_penetration_sphere_2(self):
s1 = sphere.create()
s2 = sphere.create([1.,0.,0.], 1.0)
self.assertEqual(gt.sphere_penetration_sphere(s1, s2), 1.0)
def test_sphere_penetration_sphere_3(self):
s1 = sphere.create()
s2 = sphere.create([2.,0.,0.], 1.0)
self.assertEqual(gt.sphere_penetration_sphere(s1, s2), 0.0)
def test_sphere_penetration_sphere_4(self):
s1 = sphere.create()
s2 = sphere.create([3.,0.,0.], 1.0)
self.assertEqual(gt.sphere_penetration_sphere(s1, s2), 0.0)
def test_ray_intersect_sphere_no_solution_1(self):
r = ray.create([0, 2, 0], [1, 0, 0])
s = sphere.create([0, 0, 0], 1)
intersections = ray_intersect_sphere(r, s)
self.assertEqual(len(intersections), 0)
def test_ray_intersect_sphere_no_solution_2(self):
r = ray.create([0, 0, 0], [1, 0, 0])
s = sphere.create([0, 2, 0], 1)
intersections = ray_intersect_sphere(r, s)
self.assertEqual(len(intersections), 0)
def test_ray_intersect_sphere_one_solution_1(self):
r = ray.create([0, 0, 0], [1, 0, 0])
s = sphere.create([0, 0, 0], 1)
intersections = ray_intersect_sphere(r, s)
self.assertEqual(len(intersections), 1)
np.testing.assert_array_almost_equal(intersections[0], np.array([1, 0, 0]), decimal=2)
def test_ray_intersect_sphere_two_solutions_1(self):
r = ray.create([-2, 0, 0], [1, 0, 0])
s = sphere.create([0, 0, 0], 1)
intersections = ray_intersect_sphere(r, s)
self.assertEqual(len(intersections), 2)
np.testing.assert_array_almost_equal(intersections[0], np.array([1, 0, 0]), decimal=2)
np.testing.assert_array_almost_equal(intersections[1], np.array([-1, 0, 0]), decimal=2)
def test_ray_intersect_sphere_two_solutions_2(self):
r = ray.create([2.48, 1.45, 1.78], [-3.1, 0.48, -3.2])
s = sphere.create([1, 1, 0], 1)
intersections = ray_intersect_sphere(r, s)
self.assertEqual(len(intersections), 2)
np.testing.assert_array_almost_equal(intersections[0], np.array([0.44, 1.77, -0.32]), decimal=2)
np.testing.assert_array_almost_equal(intersections[1], np.array([1.41, 1.62, 0.67]), decimal=2)
if __name__ == '__main__':
unittest.main()
| 36.962145
| 104
| 0.581804
| 1,860
| 11,717
| 3.459677
| 0.054839
| 0.046309
| 0.034499
| 0.020513
| 0.911267
| 0.866511
| 0.818026
| 0.765035
| 0.714685
| 0.696659
| 0
| 0.078009
| 0.238542
| 11,717
| 316
| 105
| 37.079114
| 0.643241
| 0
| 0
| 0.503846
| 0
| 0
| 0.000683
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.173077
| false
| 0
| 0.034615
| 0
| 0.211538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b497aee10348953dd46616dc98824f2c3d70953e
| 1,042
|
py
|
Python
|
tests/lid_driven_cavity/test.py
|
nazmas/SNaC
|
e928adc142df5bbe1a7941907c35add6ea6f1ff0
|
[
"MIT"
] | null | null | null |
tests/lid_driven_cavity/test.py
|
nazmas/SNaC
|
e928adc142df5bbe1a7941907c35add6ea6f1ff0
|
[
"MIT"
] | null | null | null |
tests/lid_driven_cavity/test.py
|
nazmas/SNaC
|
e928adc142df5bbe1a7941907c35add6ea6f1ff0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
def test_ldc():
import numpy as np
import os
from read_single_field_binary import read_single_field_binary
data_ref = np.loadtxt("data_ldc_re1000.txt")
if "data_x" in os.getcwd():
data,xp,yp,zp,xu,yv,zw = read_single_field_binary("vey_fld_0001500.bin",np.array([1,1,1]))
islice = int(np.size(data[0,0,:])/2)
np.testing.assert_allclose(data[0,islice,:], data_ref[:,1], rtol=1e-7, atol=0)
if "data_y" in os.getcwd():
data,xp,yp,zp,xu,yv,zw = read_single_field_binary("vex_fld_0001500.bin",np.array([1,1,1]))
islice = int(np.size(data[0,0,:])/2)
np.testing.assert_allclose(data[islice,0,:], data_ref[:,1], rtol=1e-7, atol=0)
if "data_z" in os.getcwd():
data,xp,yp,zp,xu,yv,zw = read_single_field_binary("vex_fld_0001500.bin",np.array([1,1,1]))
islice = int(np.size(data[0,:,0])/2)
np.testing.assert_allclose(data[islice,:,0], data_ref[:,1], rtol=1e-7, atol=0)
if __name__ == "__main__":
test_ldc()
print("Passed!")
| 47.363636
| 98
| 0.643954
| 185
| 1,042
| 3.394595
| 0.302703
| 0.019108
| 0.119427
| 0.167197
| 0.718153
| 0.718153
| 0.718153
| 0.718153
| 0.718153
| 0.718153
| 0
| 0.066743
| 0.166027
| 1,042
| 21
| 99
| 49.619048
| 0.655926
| 0.019194
| 0
| 0.2
| 0
| 0
| 0.106758
| 0
| 0
| 0
| 0
| 0
| 0.15
| 1
| 0.05
| false
| 0.05
| 0.15
| 0
| 0.2
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b4e8ec3e073f72df115d2e467a43a5e057d8d890
| 35
|
py
|
Python
|
slack_bolt/response/__init__.py
|
korymath/bolt-python
|
67e0286d756ba92510315d044303f43b03380b52
|
[
"MIT"
] | 160
|
2019-09-27T18:02:03.000Z
|
2022-03-15T23:46:40.000Z
|
slack_bolt/response/__init__.py
|
korymath/bolt-python
|
67e0286d756ba92510315d044303f43b03380b52
|
[
"MIT"
] | 2
|
2019-10-21T13:30:17.000Z
|
2019-10-30T00:09:11.000Z
|
slack_bolt/response/__init__.py
|
korymath/bolt-python
|
67e0286d756ba92510315d044303f43b03380b52
|
[
"MIT"
] | 31
|
2019-10-19T18:10:23.000Z
|
2022-02-28T14:13:19.000Z
|
from .response import BoltResponse
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b4ed182369b3b188f8f312aa3ddca9ef3c96de04
| 36
|
py
|
Python
|
acousticsim/clustering/__init__.py
|
JoFrhwld/python-acoustic-similarity
|
50f71835532010b2fedf14b0ca3a52d88a9ab380
|
[
"MIT"
] | 5
|
2018-01-15T22:06:20.000Z
|
2022-02-21T07:02:40.000Z
|
acousticsim/clustering/__init__.py
|
JoFrhwld/python-acoustic-similarity
|
50f71835532010b2fedf14b0ca3a52d88a9ab380
|
[
"MIT"
] | null | null | null |
acousticsim/clustering/__init__.py
|
JoFrhwld/python-acoustic-similarity
|
50f71835532010b2fedf14b0ca3a52d88a9ab380
|
[
"MIT"
] | 2
|
2019-11-28T17:06:27.000Z
|
2019-12-05T22:57:28.000Z
|
from .network import ClusterNetwork
| 18
| 35
| 0.861111
| 4
| 36
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
37064e9b1f6c4c2026274a61dc624c50744caad0
| 45,476
|
py
|
Python
|
analysis/Mass Action/DP/22Apro.py
|
tee-lab/PercolationModels
|
687cb8189fafeb2e0d205ea4d8a660bd953bd7b1
|
[
"BSD-3-Clause"
] | null | null | null |
analysis/Mass Action/DP/22Apro.py
|
tee-lab/PercolationModels
|
687cb8189fafeb2e0d205ea4d8a660bd953bd7b1
|
[
"BSD-3-Clause"
] | null | null | null |
analysis/Mass Action/DP/22Apro.py
|
tee-lab/PercolationModels
|
687cb8189fafeb2e0d205ea4d8a660bd953bd7b1
|
[
"BSD-3-Clause"
] | 1
|
2021-09-11T17:25:25.000Z
|
2021-09-11T17:25:25.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 23 17:18:39 2021
@author: Koustav
"""
import os
import glob
import matplotlib.pyplot as plt
import seaborn as sea
import numpy as np
import pandas as pan
import math
import collections
import matplotlib.ticker as mtick
from mpl_toolkits import mplot3d
from matplotlib.collections import LineCollection
from scipy.optimize import curve_fit
import powerlaw
def pow_law(x, a, expo):
return a*(np.power(x, expo))
def trunc_pow_law(x, a, expo, trunc_expo): #Truncated Power Law
return a*(np.power(x, expo))*np.exp(trunc_expo*x)
def main_ind():
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
gaol={} #Stores truncated power law fit data.
gaol[0.60] =[]; gaol[0.70] =[]; gaol[0.75] =[];
gaol[0.80] =[]; gaol[0.90] =[]; gaol[0.95] =[];
L=0
for i in range(6,7):
base_path = r"22Apret\Apres 256+512\256" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
Hai
'''
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
'''if(p == 0.728):
print("Skipped")
continue'''
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / (a) for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
#Sorting array in increasing order of del(s).
#DP_freqs = DP_freqs[DP_freqs[:,0].argsort()]
#Next, to convert PDF into 1 - CDF (P(S >= (DEL(S))))
print("Sorted del(s) PDF:")
print(DP_freqs)
'''DP_freqs[-2,1] += DP_freqs[-1,1]; #DP_freqs[-1,1] = 0
k= len(DP_freqs[:,1]) #Finding total number of del(s) elements
print("Total distinct del(s) samples:\t" +str(k))
for j in range(k-3, -1, -1):
#Iterate over the PDF function in reverse.
DP_freqs[j,1] += DP_freqs[j+1,1]
print("Sorted del(s) 1-CDF:")
print(DP_freqs)'''
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Individual")==False):
os.mkdir("Individual")
os.chdir("Individual")
'''if(os.path.isdir("1-CDF")==False):
os.mkdir("1-CDF")
os.chdir("1-CDF")'''
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
print("p:\t" +str(p) + " L:\t"+ str(L) + " CC:\t" +str(CC))
#hurtlocker= pan.DataFrame(DP_freqs, columns= [r"$|\Delta s|$", r"$P (S \geq \Delta s)$"])
hurtlocker= pan.DataFrame(DP_freqs, columns= [r"$|\Delta s|$", r"$P (S = \Delta s)$"])
fig = plt.figure(figsize=(6.4,4.8))
f = sea.scatterplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S = \Delta s)$")
f.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
#Overlaying two seaborn plots.
#ax = fig.add_subplot(111)
#sea.scatterplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S \geq \Delta s)$", alpha=0.5, s=2, ax= ax)
#sea.lineplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S \geq \Delta s)$", alpha=0.2, ax= ax) #, s=1)
#ax.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
plt.yscale('log'); plt.xscale('log')
plt.xlim(1, 10**5)
plt.ylim(10**(-6.4), 10**(0.1))
plt.savefig("0P(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
'''x1 = np.transpose(DP_freqs[:,0])
x2 = np.transpose(DP_freqs[:,1])
popt, pcov = curve_fit(trunc_pow_law, x1, x2, p0= np.asarray([1, -0.75, -0.0005]), maxfev=5000 )
perr = np.sqrt(np.diag(pcov))
print("SD of exponent:\t" +str(perr[1]) + " for p:\t" +str(p))
tukan= (popt[0], popt[1], perr[1], popt[2], perr[2])
plt.plot(x1, trunc_pow_law(x1, *popt), 'm--', label=r'Fit: $ P (S \geq \Delta s) = %3.2f \times \Delta s^{(%4.3f \mp %4.3f)}\times e^{(%4.3f \mp %4.3f)\times \Delta s}$ ' % tukan )
plt.ylim(10**(-6.4), 10**(0.1)); plt.xlim(1, 10**5)
plt.legend()
plt.savefig("Fit 1- CDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
#Saving best fit data.
gaol[float(round(CC,2))].append([L, p, -popt[1], perr[1], -popt[2], perr[2]])'''
os.chdir(r"..\..\..\..\..\analysis\Mass Action\DP")
#break;
#Saving as CSVs.
'''if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("%d" %(L))==False):
os.mkdir("%d" %(L))
os.chdir("%d" %(L))
K= [0.6, 0.7, 0.75, 0.8, 0.9, 0.95]
heado = 'L, p, alpha, SD(alpha), lambda, SD(lambda)'
for k in K:
np.savetxt("BestFitCDF_CC_%3.2F.csv" %(k), gaol[k], delimiter=',', header=heado, comments='#')
os.chdir(r"../../")'''
def main_ccdf_fit():
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
gaol={} #Stores truncated power law fit data.
gaol[0.60] =[]; gaol[0.70] =[]; gaol[0.75] =[];
gaol[0.80] =[]; gaol[0.90] =[]; gaol[0.95] =[];
L=0; crosc= 0.7
for i in range(0,10):
base_path = r"22Apret\Apres 256+512\512" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( p == 0.678):
print(str(CC) + " " + str(p) + " shall be skipped.")
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
fit = powerlaw.Fit(data_temp[:,5],discrete=True,estimate_discrete = False) #If you already know xmin pass it as an argument (xmin=value) for speed
print("p:\t" +str(p) + " L:\t"+ str(L) + " CC:\t" +str(CC))
print('x_min: ',fit.xmin)
print('alpha: ',fit.truncated_power_law.parameter1)
print('1/lambda: ',1/fit.truncated_power_law.parameter2)
tukan = (-fit.truncated_power_law.parameter1, -fit.truncated_power_law.parameter2)
fig = fit.plot_ccdf(color ='cornflowerblue', ls='-', linewidth=1.1, alpha=0.2)
fit.plot_ccdf(color='darkcyan',marker='o', linestyle='', ms=1.2, alpha=0.35, ax=fig)
#ax = fig.add_subplot(111)
fit.truncated_power_law.plot_ccdf(color='darkslateblue', linestyle='--', label=r'Fit: $ P (S \geq \Delta s) \propto \Delta s^{(%4.3f)}\times e^{(%6.5f)\times \Delta s}$ ' % tukan, ax=fig)
fig.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
#x = fit.xmins
#y = fit.Ds
#plt.ylim(10**(-6.4), 10**(0.1));
plt.xlim(1, 10**5.3)
plt.xlabel(r"$|\Delta s|$")
plt.ylabel(r"$P (S \geq \Delta s)$")
plt.legend()
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Individual")==False):
os.mkdir("Individual")
os.chdir("Individual")
if(os.path.isdir("1-CDF")==False):
os.mkdir("1-CDF")
os.chdir("1-CDF")
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
plt.savefig("Better Fit 1- CDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
os.chdir("../../")
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
print("Done with CDF Plots And Fits. Moving On To PDF Plots...")
fig = fit.plot_pdf(color='darkcyan',marker='o', linestyle='', ms=1.5, alpha=0.4)
#fit.plot_pdf(color='darkcyan',marker='o', linestyle='', ms=1.2, alpha=0.35, ax=fig)
#ax = fig.add_subplot(111)
fit.truncated_power_law.plot_pdf(color='darkslateblue', linestyle='--', label=r'Fit: $ P (S = \Delta s) \propto \Delta s^{(%4.3f)}\times e^{(%6.5f)\times \Delta s}$ ' % tukan, ax=fig)
fig.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
#x = fit.xmins
#y = fit.Ds
#plt.ylim(10**(-6.4), 10**(0.1));
plt.xlim(1, 10**5.3)
plt.xlabel(r"$|\Delta s|$")
plt.ylabel(r"$P (S = \Delta s)$")
plt.legend()
plt.savefig("Better Fit PDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
comparison_tpl_exp = fit.distribution_compare('truncated_power_law','exponential',normalized_ratio=True)
comparison_tpl_streched_exp = fit.distribution_compare('truncated_power_law','stretched_exponential',normalized_ratio=True)
comparison_tpl_log_normal = fit.distribution_compare('truncated_power_law','lognormal',normalized_ratio=True)
comparison_tpl_pl = fit.distribution_compare('truncated_power_law','power_law',normalized_ratio=True)
f = open("Taupe.txt", "w+")
f.write("LR (Power Law): " + str(comparison_tpl_pl[0]) +" p-value: "+ str(comparison_tpl_pl[1]) +"\n")
f.write("LR (Exponential): " + str(comparison_tpl_exp[0]) +" p-value: "+ str(comparison_tpl_exp[1]) +"\n")
f.write("LR (Log-Normal): " + str(comparison_tpl_log_normal[0]) +" p-value: "+ str(comparison_tpl_log_normal[1]) +"\n")
f.write("LR (Stretched-Exponential): " + str(comparison_tpl_streched_exp[0]) +" p-value: "+ str(comparison_tpl_streched_exp[1]) +"\n")
f.close()
print("LR (Power Law): ",comparison_tpl_pl[0]," p-value: ",comparison_tpl_pl[1])
print("LR (Exponential): ",comparison_tpl_exp[0]," p-value: ",comparison_tpl_exp[1])
print("LR (Log-Normal): ",comparison_tpl_log_normal[0]," p-value: ",comparison_tpl_log_normal[1])
print("LR (Stretched-Exponential): ",comparison_tpl_streched_exp[0]," p-value: ",comparison_tpl_streched_exp[1])
gaol[float(round(CC,2))].append([L, p, fit.xmin, fit.truncated_power_law.parameter1, 1/fit.truncated_power_law.parameter2])
os.chdir(r"..\..\..\..\..\analysis\Mass Action\DP")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("%d" %(L))==False):
os.mkdir("%d" %(L))
os.chdir("%d" %(L))
K= [0.6, 0.7, 0.75, 0.8, 0.9, 0.95]
heado = 'L, p, x_min, alpha, 1/lambda'
for k in K:
np.savetxt("Nu_Pow_0_6_BestFitCDF_CC_%3.2F.csv" %(k), gaol[k], delimiter=',', header=heado, comments='#')
os.chdir(r"../../")
def main_cumulative():
p_c = 0.725194
crosc = float(input("Enter a Cross-Correlation Value To Be Analysed (Choose Between 0.95, 0.9, 0.8, 0.75, 0.7 & 0.6):\t"))
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
binder=[]; L=0;
for i in range(0,10):
base_path = r"22Apret\Apres 256+512\512" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
print('Gandu')
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( CC <= crosc - 0.01 or CC >= crosc + 0.01):
print(str(CC) + " shall be skipped.")
continue
if( p == 0.678):
print("Fuck You")
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / a for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
a,b =DP_freqs.shape
#col_P= np.zeros((a,1)); col_P = p
DP_freqs = np.insert(DP_freqs, 0, p, axis=1)
'''DP_freqs looks like:
| p, del(s), P(del(s))|
'''
'''DP_freqs = list(DP_freqs.items()) #Converting dictionary to list.
for j in range(0,len(DP_freqs)):
DP_freqs[j].append(p)'''
print(DP_freqs)
if(len(binder)==0):
#First one in the bag.
binder = DP_freqs.tolist()
else:
binder.extend(DP_freqs.tolist())
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("3D")==False):
os.mkdir("3D")
os.chdir("3D")
if(os.path.isdir("%d" %(L))==False):
os.mkdir("%d" %(L))
os.chdir("%d" %(L))
binder= np.array(binder)
fig=plt.figure()
ax = plt.axes(projection='3d')
#surf1 =ax.plot_trisurf(np.log10(binder[:,1]), binder[:,0], np.log10(binder[:,2]), cmap='viridis', edgecolor='none')
'''for k in range(0,len(self.x1)):
#Plotting SD bars
ax.plot([self.x1[k], self.x1[k]], [self.y1[k], self.y1[k]], [self.z1[k] + self.sd_z1[k], self.z1[k] - self.sd_z1[k]], marker="_", markerfacecolor='k', color='k')
'''
surf1 =ax.scatter(np.log10(binder[:,1]), binder[:,0], np.log10(binder[:,2]), c= np.log10(binder[:,2]), cmap='viridis', linewidth=0.5)
cbar1=fig.colorbar(surf1, shrink=0.75)
cbar1.ax.get_yaxis().labelpad = 12
cbar1.ax.set_ylabel(r"$P (S=\Delta s)$", rotation=270)
ax.set_xlabel(r"$log_{10}|\Delta s|$")
ax.set_zlabel(r"$log_{10}|P (S=\Delta s)|$")
ax.set_ylabel("Occupancy rate (p)")
#plt.zscale('log'); plt.xscale('log')
ax.view_init(elev=36.0, azim=-52.0)
ax.legend()
ax.set_title(r"$P (S=\Delta s)$ vs $|\Delta s|$, L = %d, $R_{0,0}$ = %3.2f" %(L,crosc))
plt.savefig("Cumulative Scatter P(del(s)) vs del(s) --- Grid Size (G)_%d - CC_%3.2f.png" %(L,crosc), dpi=550)
plt.show()
plt.close()
'''Now for scatter plot'''
fig=plt.figure(figsize=(6.4,4.8))
#ax = plt.axes(projection='3d')
ax = fig.add_subplot(111,projection='3d')
surf1 =ax.scatter(np.log10(binder[:,1]), binder[:,0], np.log10(binder[:,2]), c= np.log10(binder[:,2]), cmap='viridis', linewidth=0.5)
'''for k in range(0,len(self.x1)):
#Plotting SD bars
ax.plot([self.x1[k], self.x1[k]], [self.y1[k], self.y1[k]], [self.z1[k] + self.sd_z1[k], self.z1[k] - self.sd_z1[k]], marker="_", markerfacecolor='k', color='k')
'''
cbar1=fig.colorbar(surf1, shrink=0.75)
cbar1.ax.get_yaxis().labelpad = 12
cbar1.ax.set_ylabel(r"$log|P (S=\Delta s)|$", rotation=270)
ax.set_xlabel(r"$log_{10}|\Delta s|$")
ax.set_xlim(-0.1, 5)
ax.set_zlabel(r"$log_{10}|P (S=\Delta s)|$")
ax.set_zlim(-6.1, 0)
ax.set_ylabel("Occupancy rate (p)")
#plt.zscale('log'); plt.xscale('log')
#Plotting p_c plane.
x = np.linspace(-1,5.5,10)
z = np.linspace(-7,1,10)
X,Z = np.meshgrid(x,z)
Y= 0*X +0*Z + p_c
#ax.hold(True) #Preserve pre-plotted elements.
ax.plot_surface(X,Y,Z, alpha= 0.3, color='k', antialiased=True)
ax.text(5, p_c, -1, "$p_{c}(q)$", color='0.5')
'''p_clin = np.array([[0,p_c], [5,p_c]])
lines = LineCollection([p_clin],zorder=1000,color='0.65',lw=2)
ax.add_collection3d(lines, zs=-90)'''
ax.view_init(elev=36.0, azim=-52.0)
ax.legend()
ax.set_title(r"$log|P (S=\Delta s)|$ vs $log|\Delta s|$, L = %d, $R_{0,0}$ = %3.2f" %(L,crosc))
plt.savefig("Cumulative Scatter Plane P(del(s)) vs del(s) --- Grid Size (G)_%d - CC_%3.2f.png" %(L,crosc), dpi=550)
ax.view_init(elev=62.0, azim=-3.0)
plt.savefig("Cumulative Scatter Plane P(del(s)) vs del(s) Top Down --- Grid Size (G)_%d - CC_%3.2f.png" %(L,crosc), dpi=550)
plt.show()
plt.close()
os.chdir(r"..\..\..\..\..\analysis\Mass Action\DP")
def main_del_s_count():
p_c = 0.725194
crosc = float(input("Enter a Cross-Correlation Value To Be Analysed (Choose Between 0.95, 0.9, 0.8, 0.75, 0.7 & 0.6):\t"))
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
binder=[]; L=0;
for i in range(0,10):
base_path = r"22Apret\Apres 256+512\256" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
print('Gandu')
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( CC <= crosc - 0.01 or CC >= crosc + 0.01):
print(str(CC) + " shall be skipped.")
continue
if( p == 0.678):
print("Fuck You")
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / a for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
a,b =DP_freqs.shape
#col_P= np.zeros((a,1)); col_P = p
DP_freqs = np.insert(DP_freqs, 0, p, axis=1)
'''DP_freqs looks like:
| p, del(s), P(del(s))|
'''
'''DP_freqs = list(DP_freqs.items()) #Converting dictionary to list.
for j in range(0,len(DP_freqs)):
DP_freqs[j].append(p)'''
print(DP_freqs)
print("Number of del s counts: " + str(a))
binder.append([p, a])
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Bifurcation")==False):
os.mkdir("Bifurcation")
os.chdir("Bifurcation")
if(os.path.isdir("S Count")==False):
os.mkdir("S Count")
os.chdir("S Count")
binder= np.array(binder)
hurtlocker= pan.DataFrame(binder, columns= ["p", r"Number of unique $|\Delta s|$ observations"])
f = sea.scatterplot(data=hurtlocker, x="p" , y=r"Number of unique $|\Delta s|$ observations")#, marker="+")
#sea.lineplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S \geq \Delta s)$", alpha=0.2, ax= ax) #, s=1)
f.set_title('Unique $|\Delta s|$ observations, Grid Size (G) = %d, Cross-Correlation = %3.2f' %( L, crosc))
#plt.yscale('log'); #plt.xscale('log')
#plt.ylim(1, 10**5)
plt.axvline(x= p_c, color='0.65')
plt.text(p_c+ 0.003,10**2,r'$p_{c}$',rotation=90, color ='0.65')
plt.savefig("S Count, Grid Size (G) = %d, CC = %3.2f.png" %(L, crosc), dpi=400)
plt.show()
plt.close()
os.chdir(r"..\..\..\..\..\analysis\Mass Action\DP")
def main_del_s_symmetry():
p_mask=[0.658, 0.666, 0.678, 0.689, 0.701, 0.728, 0.739, 0.743, 0.755, 0.773 ]
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
for i in range(0,10):
base_path = r"22Apret\Apres 256+512\256" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
MastBind=[]; L=0
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( p not in p_mask):
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
data_temp[:,5] -= data_temp[:,4]
#data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / (a) for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
#Sorting array in increasing order of del(s).
DP_freqs = DP_freqs[DP_freqs[:,0].argsort()]
#Next, to convert PDF into 1 - CDF (P(S >= (DEL(S))))
print("Sorted del(s) PDF:")
print(DP_freqs)
#DP_freqs[-2,1] += DP_freqs[-1,1]; #DP_freqs[-1,1] = 0
k= len(DP_freqs[:,1]) #Finding total number of del(s) elements
print("Total distinct del(s) samples:\t" +str(k))
'''Performing a log-mod transform
https://blogs.sas.com/content/iml/2014/07/14/log-transformation-of-pos-neg.html
https://juluribk.com/dealing-with-plotting-negative-zero-and-positive-values-in-log-scale.html
'''
DP_freqs[:,0] = np.sign(DP_freqs[:,0])*(np.log10(np.abs(DP_freqs[:,0])+1))
DP_freqs = np.insert(DP_freqs, 2, float(round(CC,2)), axis=1)
DP_freqs = np.insert(DP_freqs, 3, p, axis=1)
'''DP_freqs looks like:
|del(s), P(del(s)), CC, p|
'''
print("Final del(s) PDF:")
print(DP_freqs)
if(len(MastBind)== 0):
#Empty
MastBind = DP_freqs
else:
MastBind = np.concatenate((MastBind, DP_freqs), axis=0)
'''for j in range(k-3, -1, -1):
#Iterate over the PDF function in reverse.
DP_freqs[j,1] += DP_freqs[j+1,1]
print("Sorted del(s) 1-CDF:")
print(DP_freqs)'''
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Individual")==False):
os.mkdir("Individual")
os.chdir("Individual")
if(os.path.isdir("Symmetry")==False):
os.mkdir("Symmetry")
os.chdir("Symmetry")
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
print("p:\t" +str(p) + " L:\t"+ str(L) + " CC:\t" +str(CC))
hurtlocker= pan.DataFrame(DP_freqs, columns= [r"$\Delta s$", r"$P (S = \Delta s)$", "Cross-Correlation", "p"])
fig = plt.figure(figsize=(6.4,4.8))
#Overlaying two seaborn plots.
#ax = fig.add_subplot(111)
f= sea.scatterplot(data=hurtlocker, x=r"$\Delta s$" , y=r"$P (S = \Delta s)$")#, alpha=0.5, s=2, ax= ax)
#sea.lineplot(data=hurtlocker, x=r"$\Delta s$" , y=r"$P (S = \Delta s)$", alpha=0.2, ax= ax) #, s=1)
f.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
plt.yscale('log'); #plt.xscale('log')
#plt.xlim(1, 10**5)
plt.ylim(10**(-6.4), 10**(0.1))
#plt.xlim(-5, 5)
'''x1 = np.transpose(DP_freqs[:,0])
x2 = np.transpose(DP_freqs[:,1])
popt, pcov = curve_fit(trunc_pow_law, x1, x2, p0= np.asarray([1, -0.75, -0.0005]), maxfev=5000 )
perr = np.sqrt(np.diag(pcov))
print("SD of exponent:\t" +str(perr[1]) + " for p:\t" +str(p))
tukan= (popt[0], popt[1], perr[1], popt[2], perr[2])
plt.plot(x1, trunc_pow_law(x1, *popt), 'm--', label=r'Fit: $ P (S \geq \Delta s) = %3.2f \times \Delta s^{(%4.3f \mp %4.3f)}\times e^{(%4.3f \mp %4.3f)\times \Delta s}$ ' % tukan )
plt.legend()'''
plt.savefig("Symmetry PDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
os.chdir(r"..\..\..\..\..\..\analysis\Mass Action\DP")
#break;
#Plotting cumulative results.
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Individual")==False):
os.mkdir("Individual")
os.chdir("Individual")
if(os.path.isdir("Symmetry")==False):
os.mkdir("Symmetry")
os.chdir("Symmetry")
if(os.path.isdir("Cum")==False):
os.mkdir("Cum")
os.chdir("Cum")
hurtlocker= pan.DataFrame(MastBind, columns= [r"$\Delta s$", r"$P (S = \Delta s)$", "Cross-Correlation", "p"])
fig = plt.figure(figsize=(6.4,4.8))
#Overlaying two seaborn plots.
#ax = fig.add_subplot(111)
f= sea.scatterplot(data=hurtlocker, x=r"$\Delta s$" , y=r"$P (S = \Delta s)$", hue="Cross-Correlation")#, alpha=0.5, s=2, ax= ax)
#sea.lineplot(data=hurtlocker, x=r"$\Delta s$" , y=r"$P (S = \Delta s)$", alpha=0.2, ax= ax) #, s=1)
f.set_title('p = %f, Grid Size (G) = %d' %(MastBind[0,3], L))
plt.yscale('log'); #plt.xscale('log')
#plt.xlim(1, 10**5)
plt.ylim(10**(-6.4), 10**(0.1))
plt.xlim(-5, 5)
plt.savefig("Alt Cum Symmetry PDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d.png" %(MastBind[0,3], L), dpi=400)
#plt.show()
plt.close()
os.chdir(r"..\..\..\..\..\..\analysis\Mass Action\DP")
def main_bifurcation():
p_c = 0.725194
crosc = float(input("Enter a Cross-Correlation Value To Be Analysed (Choose Between 0.95, 0.9, 0.8, 0.75, 0.7 & 0.6):\t"))
#crosc =0.8
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
binder=[]; L=0;
for i in range(0,10):
base_path = r"22Apret\Apres 256+512\256" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
print('Gandu')
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( CC <= crosc - 0.01 or CC >= crosc + 0.01):
print(str(CC) + " shall be skipped.")
continue
if( p == 0.678):
print("Fuck You")
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / a for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
a,b =DP_freqs.shape
split_data = DP_freqs[:,1] < 10**(-5.6)
DP_freqs = DP_freqs[split_data]
print("Half Done:")
print(DP_freqs)
split_data = DP_freqs[:,1] > 10**(-6)
DP_freqs_band = DP_freqs[split_data] #Stores the band of del(s) values whose probability lie between 10^(-5.85) and 10^(-5.85)
#col_P= np.zeros((a,1)); col_P = p
DP_freqs_band = np.insert(DP_freqs_band, 0, p, axis=1)
DP_freqs_band = DP_freqs_band[DP_freqs_band[:,1].argsort()]
#Sorting in increasing values of del(s)
print("Total number of points in given gap for p:\t"+str(p) +" is: \t" +str(len(DP_freqs_band[:,2])) +"\n")
print(DP_freqs_band)
'''DP_freqs looks like:
| p, del(s), P(del(s))|
'''
flag=0
for j in range(1, len(DP_freqs_band[:,2])-1):
if(abs(DP_freqs_band[j,1] -DP_freqs_band[j-1,2]) > 411 or abs(DP_freqs_band[j,1] -DP_freqs_band[j+1,2]) > 411):
# 10^(3.3) - 10^(3.2) = 410.369
binder.append([p,DP_freqs_band[j,1]])
flag=1
if(flag==0):
#No del(s) value satisfied the bandwidth demand.
#if()
binder.append([p,DP_freqs_band[-1,1]])
#Append the very last value
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Bifurcation")==False):
os.mkdir("Bifurcation")
os.chdir("Bifurcation")
if(os.path.isdir("%d" %(L))==False):
os.mkdir("%d" %(L))
os.chdir("%d" %(L))
binder= np.array(binder)
hurtlocker= pan.DataFrame(binder, columns= ["p", r"$|\Delta s|$ s.t. $P (\Delta s \geq 10^{-6})$"])
f = sea.scatterplot(data=hurtlocker, x="p" , y=r"$|\Delta s|$ s.t. $P (\Delta s \geq 10^{-6})$", marker="+")
#sea.lineplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S \geq \Delta s)$", alpha=0.2, ax= ax) #, s=1)
f.set_title('Bifurcation Map, Grid Size (G) = %d, Cross-Correlation = %3.2f' %( L, crosc))
plt.yscale('log'); #plt.xscale('log')
plt.ylim(1, 10**5)
plt.axvline(x= p_c, color='0.65')
plt.text(p_c+ 0.003,10**1,r'$p_{c}$',rotation=90, color ='0.65')
plt.savefig("Bifurcation Map, Grid Size (G) = %d, CC = %3.2f.png" %(L, crosc), dpi=400)
plt.show()
plt.close()
os.chdir(r"..\..\..\..\..\analysis\Mass Action\DP")
def plot_fit_pdf():
twist =(-1.2912647288993737, -(1/37.72480211483688))
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
gaol={} #Stores truncated power law fit data.
gaol[0.60] =[]; gaol[0.70] =[]; gaol[0.75] =[];
gaol[0.80] =[]; gaol[0.90] =[]; gaol[0.95] =[];
L=0; crosc= 0.7
for i in range(0,1):
base_path = r"22Apret\Apres 256+512\256" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( CC <= crosc - 0.01 or CC >= crosc + 0.01 or p != 0.66):
print(str(CC) + " " + str(p) + " shall be skipped.")
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
'''
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / (a) for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
#Sorting array in increasing order of del(s).
DP_freqs = DP_freqs[DP_freqs[:,0].argsort()]
print("Sorted del(s) PDF:")
print(DP_freqs)
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Individual")==False):
os.mkdir("Individual")
os.chdir("Individual")
if(os.path.isdir("1-CDF")==False):
os.mkdir("1-CDF")
os.chdir("1-CDF")
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
print("p:\t" +str(p) + " L:\t"+ str(L) + " CC:\t" +str(CC))
hurtlocker= pan.DataFrame(DP_freqs, columns= [r"$|\Delta s|$", r"$P (S = \Delta s)$"])
fig = plt.figure(figsize=(6.4,4.8))
#Overlaying two seaborn plots.
ax = fig.add_subplot(111)
sea.scatterplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S = \Delta s)$", ax= ax)#, alpha=0.5, s=2, ax= ax)
#sea.lineplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S = \Delta s)$", alpha=0.2, ax= ax) #, s=1)
ax.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
plt.yscale('log'); plt.xscale('log')
plt.xlim(1, 10**5)
plt.ylim(10**(-6.3), 10**(0.1))
x1 = np.transpose(DP_freqs[:,0])
x2 = np.transpose(DP_freqs[:,1])
#popt, pcov = curve_fit(trunc_pow_law, x1, x2, p0= np.asarray([1, -0.75, -0.0005]), maxfev=5000 )
#perr = np.sqrt(np.diag(pcov))
#print("SD of exponent:\t" +str(perr[1]) + " for p:\t" +str(p))
#tukan= (popt[0], popt[1], perr[1], popt[2], perr[2])
plt.plot(x1, trunc_pow_law(x1, *twist), color='darkslateblue', linestyle='--', label=r'Fit: $ P (S = \Delta s) = %3.2f \times \Delta s^{(%4.3f)}\times e^{(%6.5f)\times \Delta s}$ ' % tukan )
plt.ylim(10**(-6.4), 10**(0.1)); plt.xlim(1, 10**5)
plt.legend()
plt.savefig("Fit 1- CDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
#Next, to convert PDF into 1 - CDF (P(S >= (DEL(S))))
DP_freqs[-2,1] += DP_freqs[-1,1]; #DP_freqs[-1,1] = 0
k= len(DP_freqs[:,1]) #Finding total number of del(s) elements
print("Total distinct del(s) samples:\t" +str(k))
for j in range(k-3, -1, -1):
#Iterate over the PDF function in reverse.
DP_freqs[j,1] += DP_freqs[j+1,1]
print("Sorted del(s) 1-CDF:")
print(DP_freqs)
plt.savefig("Even Better Fit 1- CDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
comparison_tpl_exp = fit.distribution_compare('truncated_power_law','exponential',normalized_ratio=True)
comparison_tpl_streched_exp = fit.distribution_compare('truncated_power_law','stretched_exponential',normalized_ratio=True)
comparison_tpl_log_normal = fit.distribution_compare('truncated_power_law','lognormal',normalized_ratio=True)
comparison_tpl_pl = fit.distribution_compare('truncated_power_law','power_law',normalized_ratio=True)
print("LR (Power Law): ",comparison_tpl_pl[0]," p-value: ",comparison_tpl_pl[1])
print("LR (Exponential): ",comparison_tpl_exp[0]," p-value: ",comparison_tpl_exp[1])
print("LR (Log-Normal): ",comparison_tpl_log_normal[0]," p-value: ",comparison_tpl_log_normal[1])
print("LR (Stretched-Exponential): ",comparison_tpl_streched_exp[0]," p-value: ",comparison_tpl_streched_exp[1])
gaol[float(round(CC,2))].append([L, p, fit.xmin, fit.truncated_power_law.parameter1, 1/fit.truncated_power_law.parameter2])
os.chdir(r"..\..\..\..\..\..\analysis\Mass Action\DP")
'''
def cross_cor(grim_fandango, lag, L, p):
CC=0; k= 128/L
for t in range(0, len(grim_fandango[:,0])):
if grim_fandango[t,0] == p:
CC = grim_fandango[t,1]+ grim_fandango[t,3]*(math.exp(lag*grim_fandango[t,5]*k*k)); break;
#Calculating cross-correlation b/w frames.
print("CC:\t"+ str(CC))
return CC;
main_ind()
| 45.430569
| 207
| 0.473437
| 6,108
| 45,476
| 3.409136
| 0.07924
| 0.045334
| 0.024636
| 0.026221
| 0.87144
| 0.854536
| 0.836815
| 0.823464
| 0.815396
| 0.811891
| 0
| 0.050002
| 0.348689
| 45,476
| 1,001
| 208
| 45.430569
| 0.653027
| 0.081955
| 0
| 0.674699
| 0
| 0.040161
| 0.162715
| 0.020561
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02008
| false
| 0
| 0.026104
| 0.004016
| 0.052209
| 0.088353
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2ea1bf2e9cb8105280a4f2635279518d125a4312
| 8,005
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_grad_op.py
|
Li-fAngyU/Paddle
|
e548f65f96697830035a28f9070b40829408ccdb
|
[
"Apache-2.0"
] | 8
|
2016-08-15T07:02:27.000Z
|
2016-08-24T09:34:00.000Z
|
python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_grad_op.py
|
Li-fAngyU/Paddle
|
e548f65f96697830035a28f9070b40829408ccdb
|
[
"Apache-2.0"
] | 1
|
2022-01-28T07:23:22.000Z
|
2022-01-28T07:23:22.000Z
|
python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_grad_op.py
|
Li-fAngyU/Paddle
|
e548f65f96697830035a28f9070b40829408ccdb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci
def get_outputs(DOut, X, Y):
DX = np.dot(DOut, Y.T)
DY = np.dot(X.T, DOut)
DBias = np.sum(DOut, axis=0)
return DX, DY, DBias
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYBiasFP16(OpTest):
def setUp(self):
self.op_type = "fused_gemm_epilogue_grad"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5,
'X': np.random.random((8, 4)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5
}
self.attrs = {"activation": 'none'}
DX, DY, DBias = get_outputs(self.inputs['DOut'], self.inputs['X'],
self.inputs['Y'])
self.outputs = {'DX': DX, 'DY': DY, 'DBias': DBias}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYBiasFP32(
TestFuseGemmEpilogueGradOpDXYBiasFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYBiasFP64(
TestFuseGemmEpilogueGradOpDXYBiasFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYBiasFP16(OpTest):
def setUp(self):
self.op_type = "fused_gemm_epilogue_grad"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5,
'X': np.random.random((8, 4)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5
}
self.attrs = {"activation": 'none'}
_, DY, DBias = get_outputs(self.inputs['DOut'], self.inputs['X'],
self.inputs['Y'])
self.outputs = {'DY': DY, 'DBias': DBias}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYBiasFP32(
TestFuseGemmEpilogueGradOpDYBiasFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYBiasFP64(
TestFuseGemmEpilogueGradOpDYBiasFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYFP16(OpTest):
def setUp(self):
self.op_type = "fused_gemm_epilogue_grad"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5,
'X': np.random.random((8, 4)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5
}
self.attrs = {"activation": 'none'}
_, DY, _ = get_outputs(self.inputs['DOut'], self.inputs['X'],
self.inputs['Y'])
self.outputs = {'DY': DY}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYFP32(TestFuseGemmEpilogueGradOpDYFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYFP64(TestFuseGemmEpilogueGradOpDYFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYFP16(OpTest):
def setUp(self):
self.op_type = "fused_gemm_epilogue_grad"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5,
'X': np.random.random((8, 4)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5
}
self.attrs = {"activation": 'none'}
DX, DY, _ = get_outputs(self.inputs['DOut'], self.inputs['X'],
self.inputs['Y'])
self.outputs = {'DX': DX, 'DY': DY}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYFP32(TestFuseGemmEpilogueGradOpDXYFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYFP64(TestFuseGemmEpilogueGradOpDXYFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
if __name__ == "__main__":
np.random.seed(0)
unittest.main()
| 33.354167
| 75
| 0.6396
| 1,047
| 8,005
| 4.723973
| 0.147087
| 0.033967
| 0.077639
| 0.054994
| 0.762232
| 0.762232
| 0.762232
| 0.762232
| 0.762232
| 0.706429
| 0
| 0.028044
| 0.242723
| 8,005
| 239
| 76
| 33.493724
| 0.787859
| 0.0807
| 0
| 0.781609
| 0
| 0
| 0.096678
| 0.013072
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12069
| false
| 0
| 0.034483
| 0
| 0.252874
| 0.005747
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2ed1c87b80dd4e8779929d2ec1d831bf4040a93d
| 45
|
py
|
Python
|
garbage/buidlInformation.py
|
mjasnikovs/horus
|
c342aafc074e163a5a2eaa3564cba3131c6050a0
|
[
"MIT"
] | null | null | null |
garbage/buidlInformation.py
|
mjasnikovs/horus
|
c342aafc074e163a5a2eaa3564cba3131c6050a0
|
[
"MIT"
] | null | null | null |
garbage/buidlInformation.py
|
mjasnikovs/horus
|
c342aafc074e163a5a2eaa3564cba3131c6050a0
|
[
"MIT"
] | null | null | null |
import cv2
print(cv2.getBuildInformation())
| 11.25
| 32
| 0.8
| 5
| 45
| 7.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04878
| 0.088889
| 45
| 3
| 33
| 15
| 0.829268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
257570ef08bf6f96adf3ca076eab3e37b42bac17
| 6,083
|
py
|
Python
|
results/migrations/0001_initial.py
|
lilbex/bitcom
|
c0d09155b655de3ebe84851f24e5c07ef60da611
|
[
"MIT"
] | null | null | null |
results/migrations/0001_initial.py
|
lilbex/bitcom
|
c0d09155b655de3ebe84851f24e5c07ef60da611
|
[
"MIT"
] | null | null | null |
results/migrations/0001_initial.py
|
lilbex/bitcom
|
c0d09155b655de3ebe84851f24e5c07ef60da611
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-08-24 18:31
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='agentname',
fields=[
('name_id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('firstname', models.CharField(max_length=200)),
('lastname', models.CharField(max_length=200)),
('email', models.CharField(max_length=200)),
('phone', models.CharField(max_length=200)),
('pollingunit_uniqueid', models.IntegerField()),
],
),
migrations.CreateModel(
name='announced_lga_results',
fields=[
('result_id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('lga_name', models.CharField(max_length=200)),
('party_abbreviation', models.CharField(max_length=50)),
('party_score', models.IntegerField()),
('entered_by_user', models.CharField(max_length=200)),
('date_entered', models.DateTimeField()),
('user_ip_address', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='announced_pu_results',
fields=[
('result_id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('polling_unit_uniqueid', models.CharField(max_length=200)),
('party_abbreviation', models.CharField(max_length=50)),
('party_score', models.IntegerField()),
('entered_by_user', models.CharField(max_length=7)),
('date_entered', models.DateTimeField()),
('user_ip_address', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='announced_state_results',
fields=[
('result_id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('state_name', models.CharField(max_length=200)),
('party_abbreviation', models.CharField(max_length=50)),
('party_score', models.IntegerField()),
('entered_by_user', models.CharField(max_length=200)),
('date_entered', models.DateTimeField()),
('user_ip_address', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='announced_ward_results',
fields=[
('result_id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('ward_name', models.CharField(max_length=200)),
('party_abbreviation', models.CharField(max_length=50)),
('party_score', models.IntegerField()),
('entered_by_user', models.CharField(max_length=200)),
('date_entered', models.DateTimeField()),
('user_ip_address', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='lga',
fields=[
('uniqueid', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('lga_id', models.IntegerField()),
('lga_name', models.CharField(max_length=200)),
('state_id', models.IntegerField()),
('lga_description', models.TextField()),
('entered_by_user', models.CharField(max_length=200)),
('date_entered', models.DateTimeField(max_length=200)),
('user_ip_address', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='party',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('partyid', models.CharField(max_length=200)),
('partyname', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='polling_unit',
fields=[
('uniqueid', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('polling_unit_id', models.IntegerField()),
('ward_id', models.IntegerField()),
('lga_id', models.IntegerField()),
('uniquewardid', models.IntegerField()),
('polling_unit_number', models.CharField(max_length=200)),
('polling_unit_name', models.CharField(max_length=200)),
('polling_unit_description', models.TextField()),
('lat', models.CharField(max_length=50)),
('long', models.CharField(max_length=200)),
('entered_by_user', models.CharField(max_length=200)),
('date_entered', models.DateTimeField()),
('user_ip_address', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='states',
fields=[
('state_id', models.IntegerField(editable=False, primary_key=True, serialize=False, unique=True)),
('state_name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='ward',
fields=[
('uniqueid', models.IntegerField(editable=False, primary_key=True, serialize=False, unique=True)),
('ward_id', models.IntegerField()),
('ward_name', models.CharField(max_length=50)),
('lga_id', models.IntegerField()),
('ward_description', models.TextField()),
('entered_by_user', models.CharField(max_length=200)),
('date_entered', models.DateTimeField()),
('user_ip_address', models.CharField(max_length=50)),
],
),
]
| 45.736842
| 114
| 0.554661
| 545
| 6,083
| 5.954128
| 0.137615
| 0.099846
| 0.194145
| 0.25886
| 0.798767
| 0.731587
| 0.71926
| 0.681048
| 0.665023
| 0.665023
| 0
| 0.026809
| 0.307085
| 6,083
| 132
| 115
| 46.083333
| 0.743061
| 0.007398
| 0
| 0.648
| 1
| 0
| 0.15159
| 0.01839
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008
| 0
| 0.04
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
25dd5361b6b0dc7b073414ddb1a152c255756063
| 10,975
|
py
|
Python
|
convert/test_convert.py
|
mikewatkins-new/jboss_call_api
|
690179b60c0b9574d0951a1cb57ffdb6eaca8943
|
[
"MIT"
] | null | null | null |
convert/test_convert.py
|
mikewatkins-new/jboss_call_api
|
690179b60c0b9574d0951a1cb57ffdb6eaca8943
|
[
"MIT"
] | 1
|
2021-06-02T00:39:33.000Z
|
2021-06-02T00:39:33.000Z
|
convert/test_convert.py
|
mikewatkins-new/jboss_call_api
|
690179b60c0b9574d0951a1cb57ffdb6eaca8943
|
[
"MIT"
] | null | null | null |
import unittest
from convert import jboss_command_to_http_request
class TestJBOSSCommandToHTTPGETRequestOperationOnlyTestCase(unittest.TestCase):
"""Test case for JBOSS CLI commands operation only commands using HTTP GET"""
def test_no_path_one_operations_no_params_http_get(self):
"""See if we only operations without params return correctly using HTTP GET"""
test_data = ':read-resource'
desired_operation = {"operation": "resource"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_empty_params_http_get(self):
"""See if only operations with empty params return correctly using HTTP GET"""
test_data = ':read-resource()'
desired_operation = {"operation": "resource"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_single_param_http_get(self):
""" See if only operations with single parameter return correctly using HTTP GET"""
test_data = ':read-resource(attributes-only=true)'
desired_operation = {"operation": "resource", "attributes-only": "true"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_multiple_params_http_get(self):
"""See if only operations with multiple params return correctly using HTTP GET"""
test_data = ':read-attribute(include-defaults=true,name=uuid)'
desired_operation = {"operation": "attribute", "include-defaults": "true", "name": "uuid"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
class TestJBOSSCommandToHTTPPOSTRequestOperationOnlyTestCase(unittest.TestCase):
"""Test case for JBOSS CLI commands operation only commands using HTTP POST"""
def test_no_path_one_operations_no_params_http_post(self):
"""See if we only operations without params return correctly using HTTP POST"""
test_data = ':read-resource'
desired_operation = {"operation": "read-resource"}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_empty_params_http_post(self):
"""See if only operations with empty params return correctly using HTTP POST"""
test_data = ':read-resource()'
desired_operation = {"operation": "read-resource"}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_single_param_http_post(self):
"""See if only operations with single parameter return correctly using HTTP POST"""
test_data = ':read-attribute(name=server-state)'
desired_operation = {"operation": "read-attribute", "name": "server-state"}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_multiple_params_http_post(self):
"""See if only operations with multiple params return correctly using HTTP POST"""
test_data = ':read-operation-description(name=whoami,access-control=true)'
desired_operation = {"operation": "read-operation-description", "name": "whoami", "access-control": "true"}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
class TTestJBOSSCommandToHTTPGETRequestTestCase(unittest.TestCase):
"""Test case for for convert.jboss_command_to_http_request"""
def test_single_path_and_operation_no_params_http_get(self):
"""See if command with path and operation returns correctly using HTTP GET"""
test_data = '/subsystem=undertow:read-resource'
desired_operation = {"operation": "resource", "address": "/subsystem/undertow"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_single_path_and_operation_single_param_http_get(self):
"""See if command with path, operation, and single param return correctly using HTTP GET"""
test_data = '/subsystem=undertow:read-attribute(resolve-expressions=true)'
desired_operation = {
"operation": "attribute", "resolve-expressions": "true", "address": "/subsystem/undertow"
}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_single_path_and_operation_multiple_params_http_get(self):
"""See if command with path, operation, and multiple params return correctlty using HTTP GET"""
test_data = '/subsystem=undertow:read-attribute(resolve-expressions=true,name=instance-id)'
desired_operation = {
"operation": "attribute", "resolve-expressions": "true", "name": "instance-id",
"address": "/subsystem/undertow"
}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_no_params_http_get(self):
"""See if command with path, operation, and single param return correctly using HTTP GET"""
test_data = '/subsystem=undertow/server=default-server:read-resource'
desired_operation = {"operation": "resource", "address": "/subsystem/undertow/server/default-server"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_empty_params_http_get(self):
"""See if command with path, operation, and single param return correctly using HTTP GET"""
test_data = '/subsystem=undertow/server=default-server:read-resource()'
desired_operation = {"operation": "resource", "address": "/subsystem/undertow/server/default-server"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_single_param_http_get(self):
"""See if command with path, operation, and single param return correctly using HTTP GET"""
test_data = '/subsystem=undertow/server=default-server:read-attribute(name=default-host)'
desired_operation = {
"operation": "attribute", "name": "default-host",
"address": "/subsystem/undertow/server/default-server"
}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_multiple_param_http_get(self):
"""See if command with multiple pathresult, operation, and multiple param return correctly using HTTP GET"""
test_data = '/subsystem=undertow/server=default-server:read-attribute(resolve-expressions=true,include-defaults=true,name=servlet-container)'
desired_operation = {
"operation": "attribute", "resolve-expressions": "true", "include-defaults": "true",
"name": "servlet-container", "address": "/subsystem/undertow/server/default-server"
}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
class TestJBOSSCommandToHTTPPOSTRequestTestCase(unittest.TestCase):
"""Test case for for convert.jboss_command_to_http_request"""
def test_single_path_and_operation_no_params_http_post(self):
"""See if command with path and operation returns correctly using HTTP POST"""
test_data = '/core-service=management:whoami'
desired_operation = {"operation": "whoami", "address": ["core-service", "management"]}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_single_path_and_operation_single_param_http_post(self):
"""See if command with path, operation, and single param return correctly using HTTP POST"""
test_data = '/core-service=server-environment:path-info(unit=GIGABYTES)'
desired_operation = {
"operation": "path-info", "unit": "GIGABYTES",
"address": ["core-service", "server-environment"]
}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_single_path_and_operation_multiple_params_http_post(self):
"""See if command with path, operation, and multiple params return correctly using HTTP POST"""
test_data = '/subsystem=undertow:write-attribute(name=statistics-enabled,value=true)'
desired_operation = {
"operation": "write-attribute", "name": "statistics-enabled", "value": "true",
"address": ["subsystem", "undertow"]
}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_no_params_http_post(self):
"""See if command with multiple pathresult, operation, and single param return correctly using HTTP POST"""
test_data = "/subsystem=datasources/data-source=ExampleDS:dump-queued-threads-in-pool()"
desired_operation = {
"operation": "dump-queued-threads-in-pool",
"address": ["subsystem", "datasources", "data-source", "ExampleDS"]
}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_single_param_http_post(self):
"""See if command with multiple pathresult, operation, and single param return correctly using HTTP POST"""
test_data = "/core-service=management/service=configuration-changes:add(max-history=200)"
desired_operation = {
"operation": "add", "max-history": "200",
"address": ["core-service", "management", "service", "configuration-changes"]
}
result = jboss_command_to_http_request(test_data, desired_operation)
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_multiple_param_http_post(self):
"""See if command with multiple pathresult, operation, and multiple params return correctly using HTTP POST"""
test_data = "/subsystem=datasources/data-source=ExampleDS:write-attribute(name=max-pool-size,value=5000)"
desired_operation = {
"operation": "write-attribute", "name": "max-pool-size", "value": "5000",
"address": ["subsystem", "datasources", "data-source", "ExampleDS"]
}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
if __name__ == '__main__':
unittest.main()
| 50.810185
| 149
| 0.710251
| 1,282
| 10,975
| 5.801872
| 0.082683
| 0.092498
| 0.045173
| 0.05808
| 0.89715
| 0.880613
| 0.853455
| 0.818903
| 0.787308
| 0.751277
| 0
| 0.001562
| 0.183326
| 10,975
| 215
| 150
| 51.046512
| 0.828294
| 0.184601
| 0
| 0.446043
| 0
| 0.043165
| 0.277601
| 0.147359
| 0
| 0
| 0
| 0
| 0.151079
| 1
| 0.151079
| false
| 0
| 0.014388
| 0
| 0.194245
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
25e0e386f4839503cf27575ea15bd5ecf033d49a
| 113
|
py
|
Python
|
src/__init__.py
|
logic-and-learning/AdvisoRL
|
3bbd741e681e6ea72562fec142d54e9d781d097d
|
[
"MIT"
] | 4
|
2021-02-04T17:33:07.000Z
|
2022-01-24T10:29:39.000Z
|
src/__init__.py
|
logic-and-learning/AdvisoRL
|
3bbd741e681e6ea72562fec142d54e9d781d097d
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
logic-and-learning/AdvisoRL
|
3bbd741e681e6ea72562fec142d54e9d781d097d
|
[
"MIT"
] | null | null | null |
from . import baselines
from . import common
from . import reward_machines
from . import rl
from . import worlds
| 18.833333
| 29
| 0.778761
| 16
| 113
| 5.4375
| 0.5
| 0.574713
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176991
| 113
| 5
| 30
| 22.6
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d3903637fa8b57e3aab7d4a1d3f9885bab2aabda
| 34
|
py
|
Python
|
build/lib/abp/adaptives/dqn/__init__.py
|
LinearZoetrope/abp
|
2459c1b4d77606c1d70715ce8378d738ba102f37
|
[
"MIT"
] | null | null | null |
build/lib/abp/adaptives/dqn/__init__.py
|
LinearZoetrope/abp
|
2459c1b4d77606c1d70715ce8378d738ba102f37
|
[
"MIT"
] | 1
|
2018-10-17T03:28:08.000Z
|
2018-10-17T03:28:08.000Z
|
build/lib/abp/adaptives/dqn/__init__.py
|
Zaerei/abp
|
2459c1b4d77606c1d70715ce8378d738ba102f37
|
[
"MIT"
] | null | null | null |
from .adaptive import DQNAdaptive
| 17
| 33
| 0.852941
| 4
| 34
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6c98706e61936bfb19561cc8e9e0f4a5b6b8ad20
| 38
|
py
|
Python
|
tensorflow-extensions/dataset/__init__.py
|
king-michael/tensorflow-extensions
|
c563d022e95d063f221a1b030db112039b9c407e
|
[
"MIT"
] | null | null | null |
tensorflow-extensions/dataset/__init__.py
|
king-michael/tensorflow-extensions
|
c563d022e95d063f221a1b030db112039b9c407e
|
[
"MIT"
] | null | null | null |
tensorflow-extensions/dataset/__init__.py
|
king-michael/tensorflow-extensions
|
c563d022e95d063f221a1b030db112039b9c407e
|
[
"MIT"
] | null | null | null |
from .NumpyDataset import NumpyDataset
| 38
| 38
| 0.894737
| 4
| 38
| 8.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 38
| 1
| 38
| 38
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6cb2edb7e1e29ba70850bedeb3eee19d43933ca6
| 72
|
py
|
Python
|
utils/__init__.py
|
DNL-inc/bit
|
b6f35e95b2b40a3eec308a2c7179a73eadad3556
|
[
"MIT"
] | 1
|
2020-11-04T16:15:52.000Z
|
2020-11-04T16:15:52.000Z
|
utils/__init__.py
|
DNL-inc/bit
|
b6f35e95b2b40a3eec308a2c7179a73eadad3556
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
DNL-inc/bit
|
b6f35e95b2b40a3eec308a2c7179a73eadad3556
|
[
"MIT"
] | null | null | null |
from . import db_api
from . import misc
# from . import postpone_message
| 24
| 32
| 0.777778
| 11
| 72
| 4.909091
| 0.636364
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 72
| 3
| 32
| 24
| 0.9
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9f671a95487f003d950ed13b08b34375df5b9270
| 21
|
py
|
Python
|
src/kdenlive_tools/__main__.py
|
kdeldycke/kdenlive-tools
|
442fd45f6df473e15d20a67fe7feaf3b9f93acda
|
[
"BSD-2-Clause"
] | 5
|
2017-02-01T08:36:06.000Z
|
2021-08-20T16:41:33.000Z
|
src/kdenlive_tools/__main__.py
|
kdeldycke/kdenlive-tools
|
442fd45f6df473e15d20a67fe7feaf3b9f93acda
|
[
"BSD-2-Clause"
] | 1
|
2015-06-30T12:53:31.000Z
|
2015-06-30T12:53:31.000Z
|
src/kdenlive_tools/__main__.py
|
kdeldycke/kdenlive-tools
|
442fd45f6df473e15d20a67fe7feaf3b9f93acda
|
[
"BSD-2-Clause"
] | 1
|
2015-05-26T07:11:16.000Z
|
2015-05-26T07:11:16.000Z
|
import cli
cli.cli()
| 7
| 10
| 0.714286
| 4
| 21
| 3.75
| 0.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 21
| 2
| 11
| 10.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
9f80aa8cd5c991f5a585c0a271b19cf8e97f19c9
| 141
|
py
|
Python
|
lit/fields/text.py
|
velvetkeyboard/py-lit
|
2bdc722e251d2c53ed19ad0e82e2447d9cdda8f9
|
[
"Unlicense"
] | null | null | null |
lit/fields/text.py
|
velvetkeyboard/py-lit
|
2bdc722e251d2c53ed19ad0e82e2447d9cdda8f9
|
[
"Unlicense"
] | null | null | null |
lit/fields/text.py
|
velvetkeyboard/py-lit
|
2bdc722e251d2c53ed19ad0e82e2447d9cdda8f9
|
[
"Unlicense"
] | null | null | null |
from lit.fields.base import Field
from lit.fields.base import TextType
class TextField(Field):
sql_type = TextType()
py_type = str
| 17.625
| 36
| 0.737589
| 21
| 141
| 4.857143
| 0.619048
| 0.137255
| 0.254902
| 0.333333
| 0.45098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184397
| 141
| 7
| 37
| 20.142857
| 0.886957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9f827ccaec697c2953b95793dc0577ac42b9d164
| 306
|
py
|
Python
|
cmake_tidy/utils/app_configuration/__init__.py
|
MaciejPatro/cmake-tidy
|
ddab3d9c6dd1a6c9cfa47bff5a9f120defea9e6a
|
[
"MIT"
] | 16
|
2020-05-16T17:20:00.000Z
|
2022-02-14T12:08:41.000Z
|
cmake_tidy/utils/app_configuration/__init__.py
|
MaciejPatro/cmake-tidy
|
ddab3d9c6dd1a6c9cfa47bff5a9f120defea9e6a
|
[
"MIT"
] | 19
|
2020-05-18T06:17:42.000Z
|
2020-08-11T07:15:11.000Z
|
cmake_tidy/utils/app_configuration/__init__.py
|
MaciejPatro/cmake-tidy
|
ddab3d9c6dd1a6c9cfa47bff5a9f120defea9e6a
|
[
"MIT"
] | null | null | null |
###############################################################################
# Copyright Maciej Patro (maciej.patro@gmail.com)
# MIT License
###############################################################################
from cmake_tidy.utils.app_configuration.configuration import ConfigurationError
| 38.25
| 79
| 0.398693
| 18
| 306
| 6.666667
| 0.833333
| 0.183333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052288
| 306
| 7
| 80
| 43.714286
| 0.413793
| 0.19281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9fa2b6471bd6d79dfad1b744ce99d19125228b13
| 32
|
py
|
Python
|
stubbs/defs/ustr.py
|
holy-crust/reclaimer
|
0aa693da3866ce7999c68d5f71f31a9c932cdb2c
|
[
"MIT"
] | null | null | null |
stubbs/defs/ustr.py
|
holy-crust/reclaimer
|
0aa693da3866ce7999c68d5f71f31a9c932cdb2c
|
[
"MIT"
] | null | null | null |
stubbs/defs/ustr.py
|
holy-crust/reclaimer
|
0aa693da3866ce7999c68d5f71f31a9c932cdb2c
|
[
"MIT"
] | null | null | null |
from ...hek.defs.ustr import *
| 16
| 31
| 0.65625
| 5
| 32
| 4.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 32
| 1
| 32
| 32
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9fc6625edca5f3680489dcc397b225e54927655e
| 29
|
py
|
Python
|
_filament/__init__.py
|
comstud/filament
|
be6dbd6bf76dbcb0655c7fae239333d64ee8bb5f
|
[
"MIT"
] | 2
|
2017-03-08T20:29:52.000Z
|
2019-05-15T20:15:42.000Z
|
_filament/__init__.py
|
comstud/filament
|
be6dbd6bf76dbcb0655c7fae239333d64ee8bb5f
|
[
"MIT"
] | null | null | null |
_filament/__init__.py
|
comstud/filament
|
be6dbd6bf76dbcb0655c7fae239333d64ee8bb5f
|
[
"MIT"
] | null | null | null |
from _filament.core import *
| 14.5
| 28
| 0.793103
| 4
| 29
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4ca2201274eaddfe1362c3f7ce25b8cbc37de3da
| 27
|
py
|
Python
|
db_quick_setup/django/db/backends/sqlite3.py
|
amezin/django-db-quick-setup
|
e0c90c8b112b2230b19885e39a92b67b5a7d3819
|
[
"BSD-2-Clause"
] | 1
|
2016-05-27T14:25:37.000Z
|
2016-05-27T14:25:37.000Z
|
db_quick_setup/django/db/backends/sqlite3.py
|
amezin/django-db-quick-setup
|
e0c90c8b112b2230b19885e39a92b67b5a7d3819
|
[
"BSD-2-Clause"
] | null | null | null |
db_quick_setup/django/db/backends/sqlite3.py
|
amezin/django-db-quick-setup
|
e0c90c8b112b2230b19885e39a92b67b5a7d3819
|
[
"BSD-2-Clause"
] | null | null | null |
from .dummy import Backend
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4cab4a8359dd4ce2c56dafb5af2f65badffe704e
| 45
|
py
|
Python
|
vnpy_oracle/__init__.py
|
noranhe/vnpy_oracle
|
73c2ce070f36703e78af752ce8483f8cd87cf9fa
|
[
"MIT"
] | 2
|
2021-04-06T14:25:35.000Z
|
2021-07-10T02:04:59.000Z
|
vnpy_oracle/__init__.py
|
noranhe/vnpy_oracle
|
73c2ce070f36703e78af752ce8483f8cd87cf9fa
|
[
"MIT"
] | null | null | null |
vnpy_oracle/__init__.py
|
noranhe/vnpy_oracle
|
73c2ce070f36703e78af752ce8483f8cd87cf9fa
|
[
"MIT"
] | 1
|
2021-04-06T09:47:48.000Z
|
2021-04-06T09:47:48.000Z
|
from .oracle_database import database_manager
| 45
| 45
| 0.911111
| 6
| 45
| 6.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 45
| 1
| 45
| 45
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4cbb33c2f4e123b773b6ed31e96a7f22c0768349
| 1,310
|
py
|
Python
|
test/test_tpo_data_dt_os_erx_patient_prescriptions_patient_prescription_dto.py
|
my-workforce/TMB-SDK
|
bea9e8dd82240c30f7809b052a4a612202d4e607
|
[
"CECILL-B"
] | null | null | null |
test/test_tpo_data_dt_os_erx_patient_prescriptions_patient_prescription_dto.py
|
my-workforce/TMB-SDK
|
bea9e8dd82240c30f7809b052a4a612202d4e607
|
[
"CECILL-B"
] | null | null | null |
test/test_tpo_data_dt_os_erx_patient_prescriptions_patient_prescription_dto.py
|
my-workforce/TMB-SDK
|
bea9e8dd82240c30f7809b052a4a612202d4e607
|
[
"CECILL-B"
] | null | null | null |
# coding: utf-8
"""
Transaction Management Bus (TMB) API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: V3.2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.tpo_data_dt_os_erx_patient_prescriptions_patient_prescription_dto import TpoDataDTOsERXPatientPrescriptionsPatientPrescriptionDTO # noqa: E501
from swagger_client.rest import ApiException
class TestTpoDataDTOsERXPatientPrescriptionsPatientPrescriptionDTO(unittest.TestCase):
"""TpoDataDTOsERXPatientPrescriptionsPatientPrescriptionDTO unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTpoDataDTOsERXPatientPrescriptionsPatientPrescriptionDTO(self):
"""Test TpoDataDTOsERXPatientPrescriptionsPatientPrescriptionDTO"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.tpo_data_dt_os_erx_patient_prescriptions_patient_prescription_dto.TpoDataDTOsERXPatientPrescriptionsPatientPrescriptionDTO() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 32.75
| 178
| 0.789313
| 127
| 1,310
| 7.866142
| 0.543307
| 0.052052
| 0.028028
| 0.042042
| 0.226226
| 0.226226
| 0.226226
| 0.15015
| 0.15015
| 0.15015
| 0
| 0.011618
| 0.145802
| 1,310
| 39
| 179
| 33.589744
| 0.881144
| 0.494656
| 0
| 0.214286
| 1
| 0
| 0.012903
| 0
| 0
| 0
| 0
| 0.025641
| 0
| 1
| 0.214286
| false
| 0.214286
| 0.357143
| 0
| 0.642857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
e23a891f09a542df416f2a9cde89a79b43479dfe
| 27,200
|
py
|
Python
|
dD_plots_lon_runbin.py
|
HannahSus/MercuryPolarCratersDepthDiameter
|
e96fc6cadfa5ebd0558ebea737c517d51fcb0d8a
|
[
"CC0-1.0"
] | null | null | null |
dD_plots_lon_runbin.py
|
HannahSus/MercuryPolarCratersDepthDiameter
|
e96fc6cadfa5ebd0558ebea737c517d51fcb0d8a
|
[
"CC0-1.0"
] | null | null | null |
dD_plots_lon_runbin.py
|
HannahSus/MercuryPolarCratersDepthDiameter
|
e96fc6cadfa5ebd0558ebea737c517d51fcb0d8a
|
[
"CC0-1.0"
] | null | null | null |
#! /Users/susorhc1/anaconda/bin/python
##
##
##
# Program: dD_plots_lon_runbin
# Author: Hannah C.M. Susorney
# Date Created: 2020-03-03
#
# Purpose: To compare depth/diameter measurements in overlapping longitude bins
# Used in study
#
# Required Inputs: .csv of data
#
# Updates: 2021-08-31 - Clean and document code
#
#
##
##
##
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.ticker as mticker
from matplotlib.ticker import FixedLocator
################################################################################
export_location = '../analysis/'
longitude_bin_size = 15
max_diam = 10.0
min_diam = 5.0
################################################################################
file_mla = '../crater_measurements/polar_hannah.csv'
data_source_mla = '_mla'
dd_data_mla = np.loadtxt(file_mla,dtype='str',delimiter=',',skiprows=1)
index_diam = np.where((dd_data_mla[:,5].astype(np.float) < max_diam) & (dd_data_mla[:,5].astype(np.float) > min_diam))
dd_data_mla = dd_data_mla[index_diam,:]
dd_data_mla = dd_data_mla[0,:,:]
depth_mla = dd_data_mla[:,7].astype(np.float)
diameter_mla = dd_data_mla[:,5].astype(np.float)
longitude_mla = dd_data_mla[:,2].astype(np.float)
latitude_mla = dd_data_mla[:,1].astype(np.float)
radar_bright_mla = dd_data_mla[:,3]
index_radar_bright_mla = np.where(radar_bright_mla=='Yes')
for k in range(0,len(longitude_mla)):
if longitude_mla[k] > 180:
longitude_mla[k]=longitude_mla[k]-360
################################################################################
file_nancy = '../crater_measurements/depth_diameter_spreadsheet_nancy.csv'
data_source_nancy = '_nancy'
dd_data_nancy = np.loadtxt(file_nancy,dtype='str',delimiter=',',skiprows=1)
index_diam = np.where((dd_data_nancy[:,22].astype(np.float) < max_diam) & (dd_data_nancy[:,22].astype(np.float) > min_diam))
dd_data_nancy = dd_data_nancy[index_diam,:]
dd_data_nancy = dd_data_nancy[0,:,:]
depth_nancy = dd_data_nancy[:,23].astype(np.float)
depth_error_nancy = dd_data_nancy[:,8].astype(np.float)
diameter_nancy = dd_data_nancy[:,22].astype(np.float)
diameter_error_nancy = dd_data_nancy[:,6].astype(np.float)
longitude_nancy = dd_data_nancy[:,36].astype(np.float)
latitude_nancy = dd_data_nancy[:,35].astype(np.float)
for k in range(0,len(longitude_nancy)):
if longitude_nancy[k] > 180:
longitude_nancy[k]=longitude_nancy[k]-360
radar_bright_nancy = dd_data_nancy[:,1]
index_radar_bright_nancy = np.where(radar_bright_nancy=='Yes')
################################################################################
file = '../crater_measurements/Rubanenko_mercury_data.csv'
dd_data_rub = np.loadtxt(file,dtype='str',delimiter=',',skiprows=1)
index_diam = np.where((dd_data_rub[:,3].astype(np.float)/1000.0 < max_diam) & (dd_data_rub[:,3].astype(np.float)/1000.0 > min_diam))
dd_data_rub = dd_data_rub[index_diam,:]
dd_data_rub = dd_data_rub[0,:,:]
depth_rub = dd_data_rub[:,2].astype(np.float)/1000.0
diameter_rub = dd_data_rub[:,3].astype(np.float)/1000.0
longitude_rub = dd_data_rub[:,1].astype(np.float)
latitude_rub = dd_data_rub[:,0].astype(np.float)
for k in range(0,len(longitude_rub)):
if longitude_rub[k] > 180:
longitude_rub[k]=longitude_rub[k]-360
################################################################################
###### finding radar-bright data _mla ##########################################
index_radar_bright_mla = np.where(radar_bright_mla=='Yes')
longitude_radar_bright_mla = longitude_mla[index_radar_bright_mla]
latitude_radar_bright_mla = latitude_mla[index_radar_bright_mla]
depth_radar_bright_mla = depth_mla[index_radar_bright_mla]
diameter_radar_bright_mla = diameter_mla[index_radar_bright_mla]
index_not_radar_bright_mla = np.where(radar_bright_mla!='Yes')
longitude_not_radar_bright_mla = longitude_mla[index_not_radar_bright_mla]
latitude_not_radar_bright_mla = latitude_mla[index_not_radar_bright_mla]
depth_not_radar_bright_mla = depth_mla[index_not_radar_bright_mla]
diameter_not_radar_bright_mla = diameter_mla[index_not_radar_bright_mla]
################################################################################
###### finding radar-bright data _nancy ##########################################
index_radar_bright_nancy = np.where(radar_bright_nancy=='Yes')
longitude_radar_bright_nancy = longitude_nancy[index_radar_bright_nancy]
latitude_radar_bright_nancy = latitude_nancy[index_radar_bright_nancy]
depth_radar_bright_nancy = depth_nancy[index_radar_bright_nancy]
diameter_radar_bright_nancy = diameter_nancy[index_radar_bright_nancy]
index_not_radar_bright_nancy = np.where(radar_bright_nancy!='Yes')
longitude_not_radar_bright_nancy = longitude_nancy[index_not_radar_bright_nancy]
latitude_not_radar_bright_nancy = latitude_nancy[index_not_radar_bright_nancy]
depth_not_radar_bright_nancy = depth_nancy[index_not_radar_bright_nancy]
diameter_not_radar_bright_nancy = diameter_nancy[index_not_radar_bright_nancy]
################################################################################
###### binning data in longitude bins _mla ##########################################
total_lon_bins_mla = int(360/longitude_bin_size)
middle_bins_lon_mla = (np.arange(total_lon_bins_mla)*longitude_bin_size)+(longitude_bin_size/2.0)-(180+(longitude_bin_size/2.0))
mean_dd_bin_mla = np.empty(total_lon_bins_mla)
mean_dd_bin_radar_bright_mla = np.empty(total_lon_bins_mla)
mean_dd_bin_not_radar_bright_mla = np.empty(total_lon_bins_mla)
mean_dd_bin_rub = np.empty(total_lon_bins_mla)
median_dd_bin_mla = np.empty(total_lon_bins_mla)
median_dd_bin_radar_bright_mla = np.empty(total_lon_bins_mla)
median_dd_bin_not_radar_bright_mla = np.empty(total_lon_bins_mla)
median_dd_bin_rub = np.empty(total_lon_bins_mla)
std_dd_bin_mla = np.empty(total_lon_bins_mla)
std_dd_bin_radar_bright_mla = np.empty(total_lon_bins_mla)
std_dd_bin_not_radar_bright_mla = np.empty(total_lon_bins_mla)
std_dd_bin_rub = np.empty(total_lon_bins_mla)
count_dd_bin_mla = np.empty(total_lon_bins_mla)
count_dd_bin_radar_bright_mla = np.empty(total_lon_bins_mla)
count_dd_bin_not_radar_bright_mla = np.empty(total_lon_bins_mla)
count_dd_bin_rub = np.empty(total_lon_bins_mla)
for i in range(0,total_lon_bins_mla):
index_lon_bin_mla = np.where((longitude_mla>(middle_bins_lon_mla[i]-longitude_bin_size)) & (longitude_mla<(middle_bins_lon_mla[i]+longitude_bin_size)))
mean_dd_bin_mla[i] = np.mean(depth_mla[index_lon_bin_mla]/diameter_mla[index_lon_bin_mla])
median_dd_bin_mla[i] = np.median(depth_mla[index_lon_bin_mla]/diameter_mla[index_lon_bin_mla])
std_dd_bin_mla[i] = np.std(depth_mla[index_lon_bin_mla]/diameter_mla[index_lon_bin_mla])
count_dd_bin_mla[i] = len(depth_mla[index_lon_bin_mla]/diameter_mla[index_lon_bin_mla])
index_lon_bin_radar_bright_mla = np.where((longitude_radar_bright_mla>(middle_bins_lon_mla[i]-longitude_bin_size)) & (longitude_radar_bright_mla<(middle_bins_lon_mla[i]+longitude_bin_size)))
mean_dd_bin_radar_bright_mla[i] = np.mean(depth_radar_bright_mla[index_lon_bin_radar_bright_mla]/diameter_radar_bright_mla[index_lon_bin_radar_bright_mla])
median_dd_bin_radar_bright_mla[i] = np.median(depth_radar_bright_mla[index_lon_bin_radar_bright_mla]/diameter_radar_bright_mla[index_lon_bin_radar_bright_mla])
std_dd_bin_radar_bright_mla[i] = np.std(depth_radar_bright_mla[index_lon_bin_radar_bright_mla]/diameter_radar_bright_mla[index_lon_bin_radar_bright_mla])
count_dd_bin_radar_bright_mla[i] = len(depth_radar_bright_mla[index_lon_bin_radar_bright_mla]/diameter_radar_bright_mla[index_lon_bin_radar_bright_mla])
index_lon_bin_not_radar_bright_mla = np.where((longitude_not_radar_bright_mla>(middle_bins_lon_mla[i]-longitude_bin_size)) & (longitude_not_radar_bright_mla<(middle_bins_lon_mla[i]+longitude_bin_size)))
mean_dd_bin_not_radar_bright_mla[i] = np.mean(depth_not_radar_bright_mla[index_lon_bin_not_radar_bright_mla]/diameter_not_radar_bright_mla[index_lon_bin_not_radar_bright_mla])
median_dd_bin_not_radar_bright_mla[i] = np.median(depth_not_radar_bright_mla[index_lon_bin_not_radar_bright_mla]/diameter_not_radar_bright_mla[index_lon_bin_not_radar_bright_mla])
std_dd_bin_not_radar_bright_mla[i] = np.std(depth_not_radar_bright_mla[index_lon_bin_not_radar_bright_mla]/diameter_not_radar_bright_mla[index_lon_bin_not_radar_bright_mla])
count_dd_bin_not_radar_bright_mla[i] = len(depth_not_radar_bright_mla[index_lon_bin_not_radar_bright_mla]/diameter_not_radar_bright_mla[index_lon_bin_not_radar_bright_mla])
index_lon_bin_rub = np.where((longitude_rub>(middle_bins_lon_mla[i]-longitude_bin_size)) & (longitude_rub<(middle_bins_lon_mla[i]+longitude_bin_size)))
mean_dd_bin_rub[i] = np.mean(depth_rub[index_lon_bin_mla]/diameter_rub[index_lon_bin_mla])
median_dd_bin_rub[i] = np.median(depth_rub[index_lon_bin_mla]/diameter_rub[index_lon_bin_mla])
std_dd_bin_rub[i] = np.std(depth_rub[index_lon_bin_mla]/diameter_rub[index_lon_bin_mla])
count_dd_bin_rub[i] = len(depth_rub[index_lon_bin_mla]/diameter_rub[index_lon_bin_mla])
################################################################################
###### binning data in longitude bins _nancy ##########################################
total_lon_bins_nancy = int(360/longitude_bin_size)
middle_bins_lon_nancy = (np.arange(total_lon_bins_mla)*longitude_bin_size)+(longitude_bin_size/2.0)-(180+(longitude_bin_size/2.0))
mean_dd_bin_nancy = np.empty(total_lon_bins_nancy)
mean_dd_bin_radar_bright_nancy = np.empty(total_lon_bins_nancy)
mean_dd_bin_not_radar_bright_nancy = np.empty(total_lon_bins_nancy)
median_dd_bin_nancy = np.empty(total_lon_bins_nancy)
median_dd_bin_radar_bright_nancy = np.empty(total_lon_bins_nancy)
median_dd_bin_not_radar_bright_nancy = np.empty(total_lon_bins_nancy)
std_dd_bin_nancy = np.empty(total_lon_bins_nancy)
std_dd_bin_radar_bright_nancy = np.empty(total_lon_bins_nancy)
std_dd_bin_not_radar_bright_nancy = np.empty(total_lon_bins_nancy)
count_dd_bin_nancy = np.empty(total_lon_bins_nancy)
count_dd_bin_radar_bright_nancy = np.empty(total_lon_bins_nancy)
count_dd_bin_not_radar_bright_nancy = np.empty(total_lon_bins_nancy)
for i in range(0,total_lon_bins_nancy):
print(i*longitude_bin_size)
print((i+1)*longitude_bin_size)
index_lon_bin_nancy = np.where((longitude_nancy>(middle_bins_lon_mla[i]-longitude_bin_size)) & (longitude_nancy<(middle_bins_lon_mla[i]+longitude_bin_size)))
mean_dd_bin_nancy[i] = np.mean(depth_nancy[index_lon_bin_nancy]/diameter_nancy[index_lon_bin_nancy])
median_dd_bin_nancy[i] = np.median(depth_nancy[index_lon_bin_nancy]/diameter_nancy[index_lon_bin_nancy])
std_dd_bin_nancy[i] = np.std(depth_nancy[index_lon_bin_nancy]/diameter_nancy[index_lon_bin_nancy])
count_dd_bin_nancy[i] = len(depth_nancy[index_lon_bin_nancy]/diameter_nancy[index_lon_bin_nancy])
index_lon_bin_radar_bright_nancy = np.where((longitude_radar_bright_nancy>(middle_bins_lon_mla[i]-longitude_bin_size)) & (longitude_radar_bright_nancy<(middle_bins_lon_mla[i]+longitude_bin_size)))
mean_dd_bin_radar_bright_nancy[i] = np.mean(depth_radar_bright_nancy[index_lon_bin_radar_bright_nancy]/diameter_radar_bright_nancy[index_lon_bin_radar_bright_nancy])
median_dd_bin_radar_bright_nancy[i] = np.median(depth_radar_bright_nancy[index_lon_bin_radar_bright_nancy]/diameter_radar_bright_nancy[index_lon_bin_radar_bright_nancy])
std_dd_bin_radar_bright_nancy[i] = np.std(depth_radar_bright_nancy[index_lon_bin_radar_bright_nancy]/diameter_radar_bright_nancy[index_lon_bin_radar_bright_nancy])
count_dd_bin_radar_bright_nancy[i] = len(depth_radar_bright_nancy[index_lon_bin_radar_bright_nancy]/diameter_radar_bright_nancy[index_lon_bin_radar_bright_nancy])
index_lon_bin_not_radar_bright_nancy = np.where((longitude_not_radar_bright_nancy>(middle_bins_lon_mla[i]-longitude_bin_size)) & (longitude_not_radar_bright_nancy<(middle_bins_lon_mla[i]+longitude_bin_size)))
mean_dd_bin_not_radar_bright_nancy[i] = np.mean(depth_not_radar_bright_nancy[index_lon_bin_not_radar_bright_nancy]/diameter_not_radar_bright_nancy[index_lon_bin_not_radar_bright_nancy])
median_dd_bin_not_radar_bright_nancy[i] = np.median(depth_not_radar_bright_nancy[index_lon_bin_not_radar_bright_nancy]/diameter_not_radar_bright_nancy[index_lon_bin_not_radar_bright_nancy])
std_dd_bin_not_radar_bright_nancy[i] = np.std(depth_not_radar_bright_nancy[index_lon_bin_not_radar_bright_nancy]/diameter_not_radar_bright_nancy[index_lon_bin_not_radar_bright_nancy])
count_dd_bin_not_radar_bright_nancy[i] = len(depth_not_radar_bright_nancy[index_lon_bin_not_radar_bright_nancy]/diameter_not_radar_bright_nancy[index_lon_bin_not_radar_bright_nancy])
print(count_dd_bin_not_radar_bright_nancy[i])
print(count_dd_bin_not_radar_bright_mla[i])
################################################################################
###### Matplotlib formatting ######################################################
tfont = {'family' : 'Times New Roman',
'size' : 18}
mpl.rc('font',**tfont)
###### mean d/D versus binned longitude -180 to 180 _mla####################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_mla,mean_dd_bin_mla,'ko',label='All craters')
ax.errorbar(middle_bins_lon_mla,mean_dd_bin_mla, yerr=std_dd_bin_mla,fmt='ko',capsize=5)
ax.set_xlim(-31,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('Mean depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':15})
plt.tight_layout()
plt.savefig(export_location+'meandD_v_runbinned_longitude_v2_mla.pdf',format='pdf')
plt.close('all')
################################################################################
###### mean d/D versus longitude -180 to 180 _mla ###################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_mla,mean_dd_bin_radar_bright_mla,'ko',label='MLA Non-radar-bright craters')
ax.errorbar(middle_bins_lon_mla,mean_dd_bin_radar_bright_mla, yerr=std_dd_bin_radar_bright_mla,fmt='ko',capsize=5)
ax.plot(middle_bins_lon_mla,mean_dd_bin_not_radar_bright_mla,'b^',label='MLA Radar-bright craters')
ax.errorbar(middle_bins_lon_mla,mean_dd_bin_not_radar_bright_mla, yerr=std_dd_bin_not_radar_bright_mla,fmt='b^',capsize=5)
ax.plot([-180,180],[0.2,0.2],':ko')
ax.set_ylabel('Mean depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':14})
ax.text(0, 0.202, 'depth=0.2Diameter',size=12)
ax.set_ylim(0.05,0.25)
ax.set_xlim(-40,130)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
plt.tight_layout()
plt.savefig(export_location+'meandD_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_mla.pdf',format='pdf')
plt.close('all')
################################################################################
###### median d/D versus longitude -180 to 180 _mla ####################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_mla,median_dd_bin_radar_bright_mla,'ko',label='Non-radar-bright craters')
ax.errorbar(middle_bins_lon_mla,median_dd_bin_radar_bright_mla, yerr=std_dd_bin_radar_bright_mla,fmt='ko',capsize=5)
ax.plot(middle_bins_lon_mla,median_dd_bin_not_radar_bright_mla,'b^',label='Radar-bright craters')
ax.errorbar(middle_bins_lon_mla,median_dd_bin_not_radar_bright_mla, yerr=std_dd_bin_not_radar_bright_mla,fmt='b^',capsize=5)
ax.set_xlim(-31,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('Median depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':15})
plt.tight_layout()
plt.savefig(export_location+'mediandD_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_mla.pdf',format='pdf')
plt.close('all')
################################################################################
###### count d/D versus longitude -180 to 180 _mla####################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_mla,count_dd_bin_radar_bright_mla,'ko',label='Non-radar-bright craters')
ax.plot(middle_bins_lon_mla,count_dd_bin_not_radar_bright_mla,'bo',label='Radar-bright craters')
ax.set_xlim(-31,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('Number of craters measured')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':15})
plt.tight_layout()
plt.savefig(export_location+'countD_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_mla.pdf',format='pdf')
plt.close('all')
################################################################################
###### percentage radar-bright versus longitude -180 to 180 _mla####################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_mla,((count_dd_bin_radar_bright_mla/(count_dd_bin_not_radar_bright_mla+count_dd_bin_radar_bright_mla))*100),'ko',label='Percentage measured radar-bright')
ax.set_xlim(-31,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('% of measured craters that are radar-bright')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
plt.tight_layout()
plt.savefig(export_location+'percentage_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_mla.pdf',format='pdf')
plt.close('all')
###### mean d/D versus binned longitude -180 to 180 _nancy####################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_nancy,mean_dd_bin_nancy,'ko',label='All craters')
ax.errorbar(middle_bins_lon_nancy,mean_dd_bin_nancy, yerr=std_dd_bin_nancy,fmt='ko',capsize=5)
ax.set_xlim(-31,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('Mean depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':15})
plt.tight_layout()
plt.savefig(export_location+'meandD_v_runbinned_longitude_v2_nancy.pdf',format='pdf')
plt.close('all')
################################################################################
###### mean d/D versus longitude -180 to 180 _nancy ###################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_nancy,mean_dd_bin_radar_bright_nancy,'ro',label='Gridded Non-radar-bright craters',alpha=0.5)
ax.errorbar(middle_bins_lon_nancy,mean_dd_bin_radar_bright_nancy, yerr=std_dd_bin_radar_bright_nancy,fmt='ro',capsize=5,alpha=0.5)
ax.plot(middle_bins_lon_nancy,mean_dd_bin_not_radar_bright_nancy,'m^',label='Gridded Radar-bright craters',alpha=0.5)
ax.errorbar(middle_bins_lon_nancy,mean_dd_bin_not_radar_bright_nancy, yerr=std_dd_bin_not_radar_bright_nancy,fmt='m^',capsize=5,alpha=0.5)
ax.plot([-180,180],[0.2,0.2],':ko')
ax.set_ylabel('Mean depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':14})
ax.text(0, 0.202, 'depth=0.2Diameter',size=12)
ax.set_ylim(0.05,0.25)
ax.set_xlim(-40,130)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
plt.tight_layout()
plt.savefig(export_location+'meandD_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_nancy.pdf',format='pdf')
plt.close('all')
################################################################################
###### median d/D versus longitude -180 to 180 _nancy ####################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_nancy,median_dd_bin_radar_bright_nancy,'ko',label='Non-radar-bright craters')
ax.errorbar(middle_bins_lon_nancy,median_dd_bin_radar_bright_nancy, yerr=std_dd_bin_radar_bright_nancy,fmt='ko',capsize=5)
ax.plot(middle_bins_lon_nancy,median_dd_bin_not_radar_bright_nancy,'bo',label='Radar-bright craters')
ax.errorbar(middle_bins_lon_nancy,median_dd_bin_not_radar_bright_nancy, yerr=std_dd_bin_not_radar_bright_nancy,fmt='bo',capsize=5)
ax.set_xlim(-31,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('Median depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':15})
plt.tight_layout()
plt.savefig(export_location+'mediandD_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_nancy.pdf',format='pdf')
plt.close('all')
################################################################################
###### count d/D versus longitude -180 to 180 _nancy####################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_nancy,count_dd_bin_radar_bright_nancy,'ko',label='Non-radar-bright craters')
ax.plot(middle_bins_lon_nancy,count_dd_bin_not_radar_bright_nancy,'bo',label='Radar-bright craters')
ax.set_xlim(-31,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('Number of craters measured')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':15})
plt.tight_layout()
plt.savefig(export_location+'countD_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_nancy.pdf',format='pdf')
plt.close('all')
################################################################################
###### percentage radar-bright versus longitude -180 to 180 _nancy####################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_nancy,((count_dd_bin_radar_bright_nancy/(count_dd_bin_not_radar_bright_nancy+count_dd_bin_radar_bright_nancy))*100),'ko',label='Percentage measured radar-bright')
ax.set_xlim(-31,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('% of measured craters that are radar-bright')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
plt.tight_layout()
plt.savefig(export_location+'percentage_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_nancy.pdf',format='pdf')
plt.close('all')
################################################################################
###### mean d/D versus longitude -180 to 180 _mla _nancy###################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_nancy,mean_dd_bin_radar_bright_nancy,'ro',label='Gridded Non-radar-bright craters',alpha=0.5)
ax.errorbar(middle_bins_lon_nancy,mean_dd_bin_radar_bright_nancy, yerr=std_dd_bin_radar_bright_nancy,fmt='ro',capsize=5,alpha=0.5)
ax.plot(middle_bins_lon_nancy,mean_dd_bin_not_radar_bright_nancy,'mo',label='Gridded Radar-bright craters',alpha=0.5)
ax.errorbar(middle_bins_lon_nancy,mean_dd_bin_not_radar_bright_nancy, yerr=std_dd_bin_not_radar_bright_nancy,fmt='mo',capsize=5,alpha=0.5)
ax.plot(middle_bins_lon_mla,mean_dd_bin_radar_bright_mla,'ko',label='MLA Non-radar-bright craters',alpha=0.5)
ax.errorbar(middle_bins_lon_mla,mean_dd_bin_radar_bright_mla, yerr=std_dd_bin_radar_bright_mla,fmt='ko',capsize=5,alpha=0.5)
ax.plot(middle_bins_lon_mla,mean_dd_bin_not_radar_bright_mla,'bo',label='MLA Radar-bright craters',alpha=0.5)
ax.errorbar(middle_bins_lon_mla,mean_dd_bin_not_radar_bright_mla, yerr=std_dd_bin_not_radar_bright_mla,fmt='bo',capsize=5,alpha=0.5)
ax.plot([-180,180],[0.2,0.2],':ko')
ax.set_ylabel('Mean depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':14})
ax.text(0, 0.202, 'depth=0.2Diameter',size=12)
ax.set_ylim(0.05,0.25)
ax.set_xlim(-40,130)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
plt.tight_layout()
plt.savefig(export_location+'meandD_v_runbinned_longitude_v2_binned_radarbright_v_nonradarbright_mla_nancy.pdf',format='pdf')
plt.close('all')
################################################################################
###### mean d/D versus longitude -180 to 180 _mla _nancy###################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_mla[0:9],mean_dd_bin_mla[0:9],'ko',label='MLA track topography')
ax.errorbar(middle_bins_lon_mla,mean_dd_bin_mla, yerr=std_dd_bin_mla,fmt='ko',capsize=5)
ax.plot(middle_bins_lon_mla,mean_dd_bin_nancy,'ro',label='Gridded topography')
ax.errorbar(middle_bins_lon_mla,mean_dd_bin_nancy, yerr=std_dd_bin_mla,fmt='ro',capsize=5)
ax.plot(middle_bins_lon_mla,mean_dd_bin_rub,'bo',label='Rubanenko et al., 2019')
ax.errorbar(middle_bins_lon_mla,mean_dd_bin_rub, yerr=std_dd_bin_rub,fmt='bo',capsize=5)
ax.set_ylim(0.06,0.2)
ax.set_xlim(-35,121)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('Mean depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':12})
plt.tight_layout()
plt.savefig(export_location+'meandD_v_runbinned_longitude_v2_mla_nancy_rub.pdf',format='pdf')
plt.close('all')
################################################################################
###### median d/D versus longitude -180 to 180 _mla _nancy###################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(middle_bins_lon_mla,median_dd_bin_mla,'ko',label='MLA track topography')
ax.errorbar(middle_bins_lon_mla,median_dd_bin_mla, yerr=std_dd_bin_mla,fmt='ko',capsize=5)
ax.plot(middle_bins_lon_mla,median_dd_bin_nancy,'ro',label='Gridded topography')
ax.errorbar(middle_bins_lon_mla,median_dd_bin_nancy, yerr=std_dd_bin_mla,fmt='ro',capsize=5)
ax.plot(middle_bins_lon_mla,median_dd_bin_rub,'bo',label='Rubanenko et al., 2019')
ax.errorbar(middle_bins_lon_mla,median_dd_bin_rub, yerr=std_dd_bin_rub,fmt='bo',capsize=5)
ax.set_xlim(-35,121)
ax.set_ylim(0.06,0.2)
ax.xaxis.set_major_locator(FixedLocator(np.arange(-30, 150, 30)))
ax.set_ylabel('Mean depth/diameter')
ax.set_xlabel('Longitude (degrees)')
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.legend(prop={'size':12})
plt.tight_layout()
plt.savefig(export_location+'mediandD_v_runbinned_longitude_mla_nancy_rub.pdf',format='pdf')
plt.close('all')
| 50.746269
| 212
| 0.742096
| 4,275
| 27,200
| 4.263158
| 0.049123
| 0.136406
| 0.077586
| 0.054102
| 0.91369
| 0.891248
| 0.856296
| 0.817449
| 0.79561
| 0.763841
| 0
| 0.02243
| 0.05261
| 27,200
| 535
| 213
| 50.841122
| 0.684815
| 0.040478
| 0
| 0.45
| 0
| 0
| 0.108465
| 0.043667
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013889
| 0
| 0.013889
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e240f8d4c7d37f70aa462fa6abf5e545e2313227
| 40
|
py
|
Python
|
wiki_search/dataset/__init__.py
|
WikiMegrez/wikisearch
|
89dcd07962bacf0dc3cce55bf529b8af44e8150e
|
[
"Apache-2.0"
] | null | null | null |
wiki_search/dataset/__init__.py
|
WikiMegrez/wikisearch
|
89dcd07962bacf0dc3cce55bf529b8af44e8150e
|
[
"Apache-2.0"
] | null | null | null |
wiki_search/dataset/__init__.py
|
WikiMegrez/wikisearch
|
89dcd07962bacf0dc3cce55bf529b8af44e8150e
|
[
"Apache-2.0"
] | null | null | null |
from .dataset import Dataset, Document
| 13.333333
| 38
| 0.8
| 5
| 40
| 6.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 40
| 2
| 39
| 20
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2c6833152550ccb91e9895b97f5563c2931c78f6
| 104
|
py
|
Python
|
qapi/protocols/cryptography/key_distribution/exceptions.py
|
seunomonije/quantum-programming-api
|
b2d45cdbf13b8e4d3917d9bea6317898da71aa33
|
[
"Apache-2.0"
] | 1
|
2021-03-13T20:59:17.000Z
|
2021-03-13T20:59:17.000Z
|
qapi/protocols/cryptography/key_distribution/exceptions.py
|
yaleqc/quantum-programming-api
|
9467cf89e138eab0ae08e7bb1a378338f7703a0a
|
[
"Apache-2.0"
] | null | null | null |
qapi/protocols/cryptography/key_distribution/exceptions.py
|
yaleqc/quantum-programming-api
|
9467cf89e138eab0ae08e7bb1a378338f7703a0a
|
[
"Apache-2.0"
] | 1
|
2021-01-10T04:19:05.000Z
|
2021-01-10T04:19:05.000Z
|
class InvalidBitstringError(BaseException):
pass
class InvalidQuantumKeyError(BaseException):
pass
| 17.333333
| 44
| 0.836538
| 8
| 104
| 10.875
| 0.625
| 0.390805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105769
| 104
| 5
| 45
| 20.8
| 0.935484
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
2c7d5fed6f3d1c89d2afce10eef18d6e78236d68
| 4,855
|
py
|
Python
|
old_lambda/lambda_function/operations.py
|
jdkandersson/cloudformation-kubernetes
|
8bd14379540bd2d122283c74166883e375cb348e
|
[
"Apache-2.0"
] | null | null | null |
old_lambda/lambda_function/operations.py
|
jdkandersson/cloudformation-kubernetes
|
8bd14379540bd2d122283c74166883e375cb348e
|
[
"Apache-2.0"
] | null | null | null |
old_lambda/lambda_function/operations.py
|
jdkandersson/cloudformation-kubernetes
|
8bd14379540bd2d122283c74166883e375cb348e
|
[
"Apache-2.0"
] | null | null | null |
"""Kubernetes operations."""
import typing
import kubernetes
from . import exceptions
from . import helpers
class CreateReturn(typing.NamedTuple):
"""
Structure of the create return value.
Attrs:
status: The status of the operation. Is SUCCESS or FAILURE.
reason: If the status is FAILURE, the reason for the failure.
physical_name: If the status is success, the physical name of the created
resource in the form [<namespace>/]<name> where the namespace is included
if the operation is namespaced.
"""
status: str
reason: typing.Optional[str]
physical_name: typing.Optional[str]
def create(*, body: typing.Dict[str, typing.Any]) -> CreateReturn:
"""
Execute create command.
Assume body has at least metadata with a name.
Args:
body: The body to create.
Returns:
Information about the outcome of the operation.
"""
try:
api_version = helpers.get_api_version(body=body)
kind = helpers.get_kind(body=body)
except exceptions.ParentError as exc:
return CreateReturn("FAILURE", str(exc), None)
client_function, namespaced = helpers.get_function(
api_version=api_version, kind=kind, operation="create"
)
# Handling non-namespaced cases
if not namespaced:
try:
response = client_function(body=body)
return CreateReturn("SUCCESS", None, response.metadata.name)
except kubernetes.client.rest.ApiException as exc:
return CreateReturn("FAILURE", str(exc), None)
# Handling namespaced
namespace = helpers.calculate_namespace(body=body)
try:
response = client_function(body=body, namespace=namespace)
return CreateReturn(
"SUCCESS", None, f"{response.metadata.namespace}/{response.metadata.name}"
)
except kubernetes.client.rest.ApiException as exc:
return CreateReturn("FAILURE", str(exc), None)
class ExistsReturn(typing.NamedTuple):
"""
Structure of the update return value.
Attrs:
status: The status of the operation. Is SUCCESS or FAILURE.
reason: If the status is FAILURE, the reason for the failure.
"""
status: str
reason: typing.Optional[str]
def update(*, body: typing.Dict[str, typing.Any], physical_name: str) -> ExistsReturn:
"""
Execute update command.
Assume body has at least metadata with a name.
Args:
body: The body to update.
physical_name: The namespace (if namespaced) and name of the resource.
Returns:
Information about the outcome of the operation.
"""
try:
api_version = helpers.get_api_version(body=body)
kind = helpers.get_kind(body=body)
except exceptions.ParentError as exc:
return ExistsReturn("FAILURE", str(exc))
client_function, namespaced = helpers.get_function(
api_version=api_version, kind=kind, operation="update"
)
# Handling non-namespaced cases
if not namespaced:
try:
client_function(body=body, name=physical_name)
return ExistsReturn("SUCCESS", None)
except kubernetes.client.rest.ApiException as exc:
return ExistsReturn("FAILURE", str(exc))
# Handling namespaced
namespace, name = physical_name.split("/")
try:
client_function(body=body, namespace=namespace, name=name)
return ExistsReturn("SUCCESS", None)
except kubernetes.client.rest.ApiException as exc:
return ExistsReturn("FAILURE", str(exc))
def delete(*, body: typing.Dict[str, typing.Any], physical_name: str) -> ExistsReturn:
"""
Execute delete command.
Assume body has at least metadata with a name.
Args:
body: The body to delete.
physical_name: The namespace (if namespaced) and name of the resource.
Returns:
Information about the outcome of the operation.
"""
try:
api_version = helpers.get_api_version(body=body)
kind = helpers.get_kind(body=body)
except exceptions.ParentError as exc:
return ExistsReturn("FAILURE", str(exc))
client_function, namespaced = helpers.get_function(
api_version=api_version, kind=kind, operation="delete"
)
# Handling non-namespaced cases
if not namespaced:
try:
client_function(name=physical_name)
return ExistsReturn("SUCCESS", None)
except kubernetes.client.rest.ApiException as exc:
return ExistsReturn("FAILURE", str(exc))
# Handling namespaced
namespace, name = physical_name.split("/")
try:
client_function(namespace=namespace, name=name)
return ExistsReturn("SUCCESS", None)
except kubernetes.client.rest.ApiException as exc:
return ExistsReturn("FAILURE", str(exc))
| 29.969136
| 86
| 0.664264
| 568
| 4,855
| 5.605634
| 0.146127
| 0.037688
| 0.031093
| 0.048995
| 0.828518
| 0.807161
| 0.750942
| 0.750942
| 0.728015
| 0.728015
| 0
| 0
| 0.24552
| 4,855
| 161
| 87
| 30.15528
| 0.869233
| 0.285891
| 0
| 0.644737
| 0
| 0
| 0.054707
| 0.016504
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039474
| false
| 0
| 0.052632
| 0
| 0.381579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2c81da921e001da3b682a7f055c4752416bfa10f
| 38,211
|
py
|
Python
|
bin/validate.py
|
sooshie/security-content
|
3007fd2ac4743041f0e37151b17780ca8f094bbf
|
[
"Apache-2.0"
] | null | null | null |
bin/validate.py
|
sooshie/security-content
|
3007fd2ac4743041f0e37151b17780ca8f094bbf
|
[
"Apache-2.0"
] | null | null | null |
bin/validate.py
|
sooshie/security-content
|
3007fd2ac4743041f0e37151b17780ca8f094bbf
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
'''
Validates Manifest file under the security-content repo for correctness.
'''
import glob
import json
import jsonschema
import yaml
import sys
import argparse
from os import path
def validate_detection_contentv2(detection, DETECTION_UUIDS, errors, macros, lookups):
if detection['id'] == '':
errors.append('ERROR: Blank ID')
if detection['id'] in DETECTION_UUIDS:
errors.append('ERROR: Duplicate UUID found: %s' % detection['id'])
else:
DETECTION_UUIDS.append(detection['id'])
if detection['name'].endswith(" "):
errors.append(
"ERROR: Detection name has trailing spaces: '%s'" %
detection['name'])
try:
detection['description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: description not ascii")
if 'how_to_implement' in detection:
try:
detection['how_to_implement'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: how_to_implement not ascii")
if 'eli5' in detection:
try:
detection['eli5'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: eli5 not ascii")
if 'known_false_positives' in detection:
try:
detection['known_false_positives'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: known_false_positives not ascii")
# modded to pass validation for uba detections - not yet fleshed out
if 'splunk' in detection['detect']:
# do a regex match here instead of key values
# if (detection['detect']['splunk']['correlation_rule']['search'].find('tstats') != -1) or \
# (detection['detect']['splunk']['correlation_rule']['search'].find('datamodel') != -1):
if (detection['detect']['splunk']['correlation_rule']['search'].find('datamodel') != -1):
if 'data_models' not in detection['data_metadata']:
errors.append("ERROR: The Splunk search uses a data model but 'data_models' field is not set")
if not detection['data_metadata']['data_models']:
errors.append("ERROR: The Splunk search uses a data model but 'data_models' is empty")
# do a regex match here instead of key values
if (detection['detect']['splunk']['correlation_rule']['search'].find('sourcetype') != -1):
if 'data_sourcetypes' not in detection['data_metadata']:
errors.append("ERROR: The Splunk search specifies a sourcetype but 'data_sourcetypes' field is not set")
elif not detection['data_metadata']['data_sourcetypes']:
errors.append("ERROR: The Splunk search specifies a sourcetype but 'data_sourcetypes' is empty")
if 'macros' in detection['detect']['splunk']['correlation_rule']:
for macro in detection['detect']['splunk']['correlation_rule']['macros']:
if macro not in macros:
errors.append("ERROR: The Splunk search specifies a macro \"{}\" but there is no macro manifest for it".format(macro))
if 'lookups' in detection['detect']['splunk']['correlation_rule']:
for lookup in detection['detect']['splunk']['correlation_rule']['lookups']:
if lookup not in lookups:
errors.append("ERROR: The Splunk search specifies a lookup \"{}\" but there is no lookup manifest for it".format(lookup))
if 'notable' in detection['detect']['splunk']['correlation_rule']:
if ('drilldown_search' in detection['detect']['splunk']['correlation_rule']['notable']) ^ \
('drilldown_name' in detection['detect']['splunk']['correlation_rule']['notable']):
errors.append("ERROR: Both drilldown_search and drilldown_name must be defined")
elif 'uba' in detection['detect']:
if (detection['detect']['uba']['correlation_rule']['search'].find('tstats') != -1) or \
(detection['detect']['splunk']['correlation_rule']['search'].find('datamodel') != -1):
if 'data_models' not in detection['data_metadata']:
errors.append("ERROR: The Splunk search uses a data model but 'data_models' field is not set")
if not detection['data_metadata']['data_models']:
errors.append("ERROR: The Splunk search uses a data model but 'data_models' is empty")
# do a regex match here instead of key values
if (detection['detect']['uba']['correlation_rule']['search'].find('sourcetype') != -1):
if 'data_sourcetypes' not in detection['data_metadata']:
errors.append("ERROR: The Splunk search specifies a sourcetype but 'data_sourcetypes' \
field is not set")
if not detection['data_metadata']['data_sourcetypes']:
errors.append("ERROR: The Splunk search specifies a sourcetype but \
'data_sourcetypes' is empty")
# do a regex match here instead of key values
return errors
def validate_investigation_contentv2(investigation, investigation_uuids, errors, macros, lookups):
if investigation['id'] == '':
errors.append('ERROR: Blank ID')
if investigation['id'] in investigation_uuids:
errors.append('ERROR: Duplicate UUID found: %s' % investigation['id'])
else:
investigation_uuids.append(investigation['id'])
if investigation['name'].endswith(" "):
errors.append(
"ERROR: Investigation name has trailing spaces: '%s'" %
investigation['name'])
try:
investigation['description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: description not ascii")
if 'how_to_implement' in investigation:
try:
investigation['how_to_implement'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: how_to_implement not ascii")
if 'eli5' in investigation:
try:
investigation['eli5'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: eli5 not ascii")
if 'known_false_positives' in investigation:
try:
investigation['known_false_positives'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: known_false_positives not ascii")
if 'splunk' in investigation['investigate']:
# do a regex match here instead of key values
if (investigation['investigate']['splunk']['search'].find('tstats') != -1) or \
(investigation['investigate']['splunk']['search'].find('datamodel') != -1):
if 'data_models' not in investigation['data_metadata']:
errors.append("ERROR: The Splunk search uses a data model but 'data_models' field is not set")
if not investigation['data_metadata']['data_models']:
errors.append("ERROR: The Splunk search uses a data model but 'data_models' is empty")
# do a regex match here instead of key values
if (investigation['investigate']['splunk']['search'].find('sourcetype') != -1):
if 'data_sourcetypes' not in investigation['data_metadata']:
errors.append("ERROR: The Splunk search specifies a sourcetype but 'data_sourcetypes' \
field is not set")
if not investigation['data_metadata']['data_sourcetypes']:
errors.append("ERROR: The Splunk search specifies a sourcetype but \
'data_sourcetypes' is empty")
if 'macros' in investigation['investigate']['splunk']:
for macro in investigation['investigate']['splunk']['macros']:
if macro not in macros:
errors.append("ERROR: The Splunk search specifies a macro \"{}\" but there is no macro manifest for it".format(macro))
if 'lookups' in investigation['investigate']['splunk']:
for lookup in investigation['investigate']['splunk']['lookups']:
if lookup not in lookups:
errors.append("ERROR: The Splunk search specifies a lookup \"{}\" but there is no lookup manifest for it".format(lookup))
return errors
def validate_baselines_contentv2(baseline, baselines_uuids, errors, macros, lookups):
if baseline['id'] == '':
errors.append('ERROR: Blank ID')
if baseline['id'] in baselines_uuids:
errors.append('ERROR: Duplicate UUID found: %s' % baseline['id'])
else:
baselines_uuids.append(baseline['id'])
if baseline['name'].endswith(" "):
errors.append(
"ERROR: Investigation name has trailing spaces: '%s'" %
baseline['name'])
try:
baseline['description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: description not ascii")
if 'how_to_implement' in baseline:
try:
baseline['how_to_implement'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: how_to_implement not ascii")
if 'eli5' in baseline:
try:
baseline['eli5'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: eli5 not ascii")
if 'known_false_positives' in baseline:
try:
baseline['known_false_positives'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: known_false_positives not ascii")
if 'splunk' in baseline['baseline']:
# do a regex match here instead of key values
if (baseline['baseline']['splunk']['search'].find('tstats') != -1) or \
(baseline['baseline']['splunk']['search'].find('datamodel') != -1):
if 'data_models' not in baseline['data_metadata']:
errors.append("ERROR: The Splunk search uses a data model but 'data_models' field is not set")
if not baseline['data_metadata']['data_models']:
errors.append("ERROR: The Splunk search uses a data model but 'data_models' is empty")
# do a regex match here instead of key values
if (baseline['baseline']['splunk']['search'].find('sourcetype') != -1):
if 'data_sourcetypes' not in baseline['data_metadata']:
errors.append("ERROR: The Splunk search specifies a sourcetype but 'data_sourcetypes' \
field is not set")
if not baseline['data_metadata']['data_sourcetypes']:
errors.append("ERROR: The Splunk search specifies a sourcetype but \
'data_sourcetypes' is empty")
if 'macros' in baseline['baseline']['splunk']:
for macro in baseline['baseline']['splunk']['macros']:
if macro not in macros:
errors.append("ERROR: The Splunk search specifies a macro \"{}\" but there is no macro manifest for it".format(macro))
if 'lookups' in baseline['baseline']['splunk']:
for lookup in baseline['baseline']['splunk']['lookups']:
if lookup not in lookups:
errors.append("ERROR: The Splunk search specifies a lookup \"{}\" but there is no lookup manifest for it".format(lookup))
return errors
def validate_detection_contentv1(detection, DETECTION_UUIDS, errors):
try:
detection['search_description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: description not ascii")
if detection['search_name'].endswith(" "):
errors.append(
"ERROR: Detection name has trailing spaces: '%s'" %
detection['search_name'])
if detection['search_id'] == '':
errors.append('ERROR: Blank ID')
if detection['search_id'] in DETECTION_UUIDS:
errors.append('ERROR: Duplicate UUID found: %s' % detection['search_id'])
else:
DETECTION_UUIDS.append(detection['search_id'])
if '| tstats' in detection['search'] or 'datamodel' in detection['search']:
if 'data_models' not in detection['data_metadata']:
errors.append(
"ERROR: The search uses a data model but 'data_models' \
field is not set")
if 'data_models' in detection and not \
detection['data_metadata']['data_models']:
errors.append(
"ERROR: The search uses a data model but 'data_models' is empty")
if 'sourcetype' in detection['search']:
if 'data_sourcetypes' not in detection['data_metadata']:
errors.append(
"ERROR: The search specifies a sourcetype but 'data_sourcetypes' \
field is not set")
if 'data_sourcetypes' in detection and not \
detection['data_metadata']['data_sourcetypes']:
errors.append(
"ERROR: The search specifies a sourcetype but \
'data_sourcetypes' is empty")
try:
detection['search_description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: search_description not ascii")
if 'how_to_implement' in detection:
try:
detection['how_to_implement'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: how_to_implement not ascii")
if 'eli5' in detection:
try:
detection['eli5'].encode('ascii')
except UnicodeEncodeError:
errors.append("eli5 not ascii")
if 'known_false_positives' in detection:
try:
detection['known_false_positives'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: known_false_positives not ascii")
if 'correlation_rule' in detection and 'notable' in \
detection['correlation_rule']:
try:
detection['correlation_rule']['notable']['rule_title'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: rule_title not ascii")
try:
detection['correlation_rule']['notable']['rule_description'].encode(
'ascii')
except UnicodeEncodeError:
errors.append("ERROR: rule_description not ascii")
return errors
def validate_investigation_contentv1(investigation, investigation_uuids, errors):
try:
investigation['search_description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: description not ascii")
if investigation['search_name'].endswith(" "):
errors.append(
"ERROR: Investigation name has trailing spaces: '%s'" %
investigation['search_name'])
if investigation['search_id'] == '':
errors.append('ERROR: Blank ID')
if investigation['search_id'] in investigation_uuids:
errors.append('ERROR: Duplicate UUID found: %s' % investigation['search_id'])
else:
investigation_uuids.append(investigation['search_id'])
if '| tstats' in investigation['search'] or 'datamodel' in investigation['search']:
if 'data_models' not in investigation['data_metadata']:
errors.append(
"ERROR: The search uses a data model but 'data_models' \
field is not set")
if 'data_models' in investigation and not \
investigation['data_metadata']['data_models']:
errors.append(
"ERROR: The search uses a data model but 'data_models' is empty")
if 'sourcetype' in investigation['search']:
if 'data_sourcetypes' not in investigation['data_metadata']:
errors.append(
"ERROR: The search specifies a sourcetype but 'data_sourcetypes' \
field is not set")
if 'data_sourcetypes' in investigation and not \
investigation['data_metadata']['data_sourcetypes']:
errors.append(
"ERROR: The search specifies a sourcetype but \
'data_sourcetypes' is empty")
try:
investigation['search_description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: search_description not ascii")
if 'how_to_implement' in investigation:
try:
investigation['how_to_implement'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: how_to_implement not ascii")
if 'eli5' in investigation:
try:
investigation['eli5'].encode('ascii')
except UnicodeEncodeError:
errors.append("eli5 not ascii")
if 'known_false_positives' in investigation:
try:
investigation['known_false_positives'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: known_false_positives not ascii")
return errors
def validate_baselines_contentv1(baseline, baselines_uuids, errors):
try:
baseline['search_description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: description not ascii")
if baseline['search_name'].endswith(" "):
errors.append(
"ERROR: Baseline name has trailing spaces: '%s'" %
baseline['search_name'])
if baseline['search_id'] == '':
errors.append('ERROR: Blank ID')
if baseline['search_id'] in baselines_uuids:
errors.append('ERROR: Duplicate UUID found: %s' % baseline['search_id'])
else:
baselines_uuids.append(baseline['search_id'])
if '| tstats' in baseline['search'] or 'datamodel' in baseline['search']:
if 'data_models' not in baseline['data_metadata']:
errors.append(
"ERROR: The search uses a data model but 'data_models' \
field is not set")
if 'data_models' in baseline and not \
baseline['data_metadata']['data_models']:
errors.append(
"ERROR: The search uses a data model but 'data_models' is empty")
if 'sourcetype' in baseline['search']:
if 'data_sourcetypes' not in baseline['data_metadata']:
errors.append(
"ERROR: The search specifies a sourcetype but 'data_sourcetypes' \
field is not set")
if 'data_sourcetypes' in baseline and not \
baseline['data_metadata']['data_sourcetypes']:
errors.append(
"ERROR: The search specifies a sourcetype but \
'data_sourcetypes' is empty")
try:
baseline['search_description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: search_description not ascii")
if 'how_to_implement' in baseline:
try:
baseline['how_to_implement'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: how_to_implement not ascii")
if 'eli5' in baseline:
try:
baseline['eli5'].encode('ascii')
except UnicodeEncodeError:
errors.append("eli5 not ascii")
if 'known_false_positives' in baseline:
try:
baseline['known_false_positives'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: known_false_positives not ascii")
return errors
def validate_investigation_content(investigation, investigation_uuids, macros, lookups):
'''Validate that the content of a investigation manifest is correct'''
errors = []
# run v1 content validation
if investigation["spec_version"] == 1:
errors = validate_investigation_contentv1(investigation, investigation_uuids, errors)
if investigation["spec_version"] == 2:
errors = validate_investigation_contentv2(investigation, investigation_uuids, errors, macros, lookups)
return errors
def validate_detection_content(detection, DETECTION_UUIDS, macros, lookups):
'''Validate that the content of a detection manifest is correct'''
errors = []
# run v1 content validation
if detection["spec_version"] == 1:
errors = validate_detection_contentv1(detection, DETECTION_UUIDS, errors)
if detection["spec_version"] == 2:
errors = validate_detection_contentv2(detection, DETECTION_UUIDS, errors, macros, lookups)
return errors
def validate_story_content(story, STORY_UUIDS):
''' Validate that the content of a story manifest is correct'''
errors = []
if story['id'] == '':
errors.append('ERROR: Blank ID')
if story['id'] in STORY_UUIDS:
errors.append('ERROR: Duplicate UUID found: %s' % story['id'])
else:
STORY_UUIDS.append(story['id'])
try:
story['description'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: description not ascii")
try:
story['narrative'].encode('ascii')
except UnicodeEncodeError:
errors.append("ERROR: narrative not ascii")
return errors
def validate_baselines_content(baseline, baselines_uuids, macros, lookups):
'''Validate that the content of a baseline manifest is correct'''
errors = []
# run v1 content validation
if baseline["spec_version"] == 1:
errors = validate_baselines_contentv1(baseline, baselines_uuids, errors)
if baseline["spec_version"] == 2:
errors = validate_baselines_contentv2(baseline, baselines_uuids, errors, macros, lookups)
return errors
def validate_investigation(REPO_PATH, verbose, macros, lookups):
''' Validates Investigation'''
INVESTIGATION_UUIDS = []
# retrive
v1_schema_file_investigative = path.join(path.expanduser(REPO_PATH), 'spec/v1/investigative_search.json.spec')
try:
v1_schema_investigative = json.loads(open(v1_schema_file_investigative, 'rb').read())
except IOError:
print "ERROR: reading version 1 investigations schema file {0}".format(v1_schema_file_investigative)
v1_schema_file_contexual = path.join(path.expanduser(REPO_PATH), 'spec/v1/contextual_search.json.spec')
try:
v1_schema_contexual = json.loads(open(v1_schema_file_contexual, 'rb').read())
except IOError:
print "ERROR: reading version 1 investigations schema file {0}".format(v1_schema_file_contexual)
v2_schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v2/investigations.spec.json')
try:
v2_schema = json.loads(open(v2_schema_file, 'rb').read())
except IOError:
print "ERROR: reading version 2 investigations schema file {0}".format(v2_schema_file)
error = False
manifest_files = path.join(path.expanduser(REPO_PATH), "investigations/*.yml")
for manifest_file in glob.glob(manifest_files):
if verbose:
print "processing investigation {0}".format(manifest_file)
# read in each investigation
with open(manifest_file, 'r') as stream:
try:
investigation = list(yaml.safe_load_all(stream))[0]
except yaml.YAMLError as exc:
print(exc)
print "Error reading {0}".format(manifest_file)
error = True
continue
# validate v1 and v2 stories against spec for both investigations and old contexual searches
if investigation['spec_version'] == 1 and investigation['search_type'] == "contextual":
try:
jsonschema.validate(instance=investigation, schema=v1_schema_contexual)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
elif investigation['spec_version'] == 1 and investigation['search_type'] == "investigative":
try:
jsonschema.validate(instance=investigation, schema=v1_schema_investigative)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
elif investigation['spec_version'] == 2:
try:
jsonschema.validate(instance=investigation, schema=v2_schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
else:
print "ERROR: Story {0} does not contain a spec_version which is required".format(manifest_file)
error = True
continue
# now lets validate the content
investigation_errors = validate_investigation_content(investigation, INVESTIGATION_UUIDS, macros, lookups)
if investigation_errors:
error = True
for err in investigation_errors:
print "{0} at:\n\t {1}".format(err, manifest_file)
return error
def validate_detection(REPO_PATH, verbose, macros, lookups):
''' Validates Detections'''
DETECTION_UUIDS = []
# retrive
v1_schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v1/detection_search.json.spec')
try:
v1_schema = json.loads(open(v1_schema_file, 'rb').read())
except IOError:
print "ERROR: reading version 1 detection schema file {0}".format(v1_schema_file)
except ValueError:
print "ERROR: File is not proper JSON {0}".format(v1_schema_file)
v2_schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v2/detections.spec.json')
try:
v2_schema = json.loads(open(v2_schema_file, 'rb').read())
except IOError:
print "ERROR: reading version 2 detection schema file {0}".format(v2_schema_file)
except ValueError:
print "ERROR: File is not proper JSON {0}".format(v2_schema_file)
error = False
manifest_files = path.join(path.expanduser(REPO_PATH), "detections/*.yml")
for manifest_file in glob.glob(manifest_files):
if verbose:
print "processing detection {0}".format(manifest_file)
# read in each detection
with open(manifest_file, 'r') as stream:
try:
detection = list(yaml.safe_load_all(stream))[0]
except yaml.YAMLError as exc:
print(exc)
print "Error reading {0}".format(manifest_file)
error = True
continue
# validate v1 and v2 stories against spec
if detection['spec_version'] == 1:
try:
jsonschema.validate(instance=detection, schema=v1_schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
elif detection['spec_version'] == 2:
try:
jsonschema.validate(instance=detection, schema=v2_schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
else:
print "ERROR: Story {0} does not contain a spec_version which is required".format(manifest_file)
error = True
continue
# now lets validate the content
detection_errors = validate_detection_content(detection, DETECTION_UUIDS, macros, lookups)
if detection_errors:
error = True
for err in detection_errors:
print "{0} at:\n\t {1}".format(err, manifest_file)
return error
def validate_story(REPO_PATH, verbose):
''' Validates Stories'''
STORY_UUIDS = []
# retrive
v1_schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v1/analytic_story.json.spec')
try:
v1_schema = json.loads(open(v1_schema_file, 'rb').read())
except IOError:
print "ERROR: reading version 1 story schema file {0}".format(v1_schema_file)
except ValueError:
print "ERROR: File is not proper JSON {0}".format(v1_schema_file)
v2_schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v2/story.spec.json')
try:
v2_schema = json.loads(open(v2_schema_file, 'rb').read())
except IOError:
print "ERROR: reading version 2 story schema file {0}".format(v2_schema_file)
except ValueError:
print "ERROR: File is not proper JSON {0}".format(v2_schema_file)
error = False
story_manifest_files = path.join(path.expanduser(REPO_PATH), "stories/*.yml")
for story_manifest_file in glob.glob(story_manifest_files):
if verbose:
print "processing story {0}".format(story_manifest_file)
# read in each story
with open(story_manifest_file, 'r') as stream:
try:
story = list(yaml.safe_load_all(stream))[0]
except yaml.YAMLError as exc:
print(exc)
print "Error reading {0}".format(story_manifest_file)
error = True
continue
# validate v1 and v2 stories against spec
if story['spec_version'] == 1:
try:
jsonschema.validate(instance=story, schema=v1_schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), story_manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
elif story['spec_version'] == 2:
try:
jsonschema.validate(instance=story, schema=v2_schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), story_manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
else:
print "ERROR: Story {0} does not contain a spec_version which is required".format(story_manifest_file)
error = True
continue
# now lets validate the content
story_errors = validate_story_content(story, STORY_UUIDS)
if story_errors:
error = True
for err in story_errors:
print "{0} at:\n\t {1}".format(err, story_manifest_file)
return error
def validate_baselines(REPO_PATH, verbose, macros, lookups):
''' Validates Baselines'''
BASELINE_UUIDS = []
# retrive
v1_schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v1/support_search.json.spec')
try:
v1_schema = json.loads(open(v1_schema_file, 'rb').read())
except IOError:
print "ERROR: reading version 1 baseline schema file {0}".format(v1_schema_file)
except ValueError:
print "ERROR: File is not proper JSON {0}".format(v1_schema_file)
v2_schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v2/baselines.spec.json')
try:
v2_schema = json.loads(open(v2_schema_file, 'rb').read())
except IOError:
print "ERROR: reading version 2 baseline schema file {0}".format(v2_schema_file)
except ValueError:
print "ERROR: File is not proper JSON {0}".format(v2_schema_file)
error = False
baselines_manifest_files = path.join(path.expanduser(REPO_PATH), "baselines/*.yml")
for baselines_manifest_file in glob.glob(baselines_manifest_files):
if verbose:
print "processing baseline {0}".format(baselines_manifest_file)
# read in each baseline
with open(baselines_manifest_file, 'r') as stream:
try:
baseline = list(yaml.safe_load_all(stream))[0]
except yaml.YAMLError as exc:
print(exc)
print "Error reading {0}".format(baselines_manifest_file)
error = True
continue
# validate v1 and v2 stories against spec
if baseline['spec_version'] == 1:
try:
jsonschema.validate(instance=baseline, schema=v1_schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), baselines_manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
elif baseline['spec_version'] == 2:
try:
jsonschema.validate(instance=baseline, schema=v2_schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), baselines_manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
else:
print "ERROR: Baseline {0} does not contain a spec_version which is required".format(baselines_manifest_file)
error = True
continue
# now lets validate the content
baselines_errors = validate_baselines_content(baseline, BASELINE_UUIDS, macros, lookups)
if baselines_errors:
error = True
for err in baselines_errors:
print "{0} at:\n\t {1}".format(err, baselines_manifest_file)
return error
def validate_macros(REPO_PATH, verbose):
''' Validates Macros'''
error = False
schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v2/macros.spec.json')
schema = json.loads(open(schema_file, 'rb').read())
macro_manifests = {}
macros_manifest_files = path.join(path.expanduser(REPO_PATH), "macros/*.yml")
for macros_manifest_file in glob.glob(macros_manifest_files):
if verbose:
print "processing macro {0}".format(macros_manifest_file)
# read in each macro
with open(macros_manifest_file, 'r') as stream:
try:
macro = list(yaml.safe_load_all(stream))[0]
except yaml.YAMLError as exc:
print(exc)
print "Error reading {0}".format(macros_manifest_file)
error = True
continue
try:
jsonschema.validate(instance=macro, schema=schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), macros_manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
macro_manifests[macro['name']] = macro
return error, macro_manifests
def validate_lookups(REPO_PATH, verbose):
''' Validates Lookups'''
error = False
schema_file = path.join(path.expanduser(REPO_PATH), 'spec/v2/lookups.spec.json')
schema = json.loads(open(schema_file, 'rb').read())
lookup_manifests = {}
lookups_manifest_files = path.join(path.expanduser(REPO_PATH), "lookups/*.yml")
for lookups_manifest_file in glob.glob(lookups_manifest_files):
if verbose:
print "processing lookup {0}".format(lookups_manifest_file)
# read in each lookup
with open(lookups_manifest_file, 'r') as stream:
try:
lookup = list(yaml.safe_load_all(stream))[0]
except yaml.YAMLError as exc:
print(exc)
print "Error reading {0}".format(lookups_manifest_file)
error = True
continue
try:
jsonschema.validate(instance=lookup, schema=schema)
except jsonschema.exceptions.ValidationError as json_ve:
print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message), lookups_manifest_file)
print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
error = True
if 'filename' in lookup:
lookup_csv_file = path.join(path.expanduser(REPO_PATH), "lookups/%s" % lookup['filename'])
if not path.isfile(lookup_csv_file):
print "ERROR: filename {} does not exist".format(lookup['filename'])
print lookup_csv_file
print "\t{}".format(lookups_manifest_file)
error = True
lookup_manifests[lookup['name']] = lookup
return error, lookup_manifests
if __name__ == "__main__":
# grab arguments
parser = argparse.ArgumentParser(description="validates security content manifest files", epilog="""
Validates security manifest for correctness, adhering to spec and other common items.
VALIDATE DOES NOT PROCESS RESPONSES SPEC for the moment.""")
parser.add_argument("-p", "--path", required=True, help="path to security-security content repo")
parser.add_argument("-v", "--verbose", required=False, action='store_true', help="prints verbose output")
# parse them
args = parser.parse_args()
REPO_PATH = args.path
verbose = args.verbose
macros_error, macros = validate_macros(REPO_PATH, verbose)
lookups_error, lookups = validate_lookups(REPO_PATH, verbose)
story_error = validate_story(REPO_PATH, verbose)
detection_error = validate_detection(REPO_PATH, verbose, macros, lookups)
investigation_error = validate_investigation(REPO_PATH, verbose, macros, lookups)
baseline_error = validate_baselines(REPO_PATH, verbose, macros, lookups)
if story_error:
sys.exit("Errors found")
elif detection_error:
sys.exit("Errors found")
elif investigation_error:
sys.exit("Errors found")
elif baseline_error:
sys.exit("Errors found")
elif macros_error:
sys.exit("Errors found")
elif lookups_error:
sys.exit("Errors found")
else:
print "No Errors found"
| 39.886221
| 141
| 0.630211
| 4,362
| 38,211
| 5.372994
| 0.051123
| 0.044033
| 0.060204
| 0.029014
| 0.868456
| 0.827751
| 0.768187
| 0.711482
| 0.656441
| 0.619747
| 0
| 0.007324
| 0.26037
| 38,211
| 957
| 142
| 39.9279
| 0.821952
| 0.031849
| 0
| 0.60745
| 0
| 0
| 0.248924
| 0.019418
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.010029
| null | null | 0.097421
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2c9e38301a32acb2dc6159e9441fa4c1000f2d7e
| 140
|
py
|
Python
|
my_submission/model/__init__.py
|
abcdcamey/Gobigger-Explore
|
75864164f3e45176a652154147740c34905d1958
|
[
"Apache-2.0"
] | 1
|
2021-12-28T02:47:07.000Z
|
2021-12-28T02:47:07.000Z
|
my_submission/model/__init__.py
|
abcdcamey/Gobigger-Explore
|
75864164f3e45176a652154147740c34905d1958
|
[
"Apache-2.0"
] | null | null | null |
my_submission/model/__init__.py
|
abcdcamey/Gobigger-Explore
|
75864164f3e45176a652154147740c34905d1958
|
[
"Apache-2.0"
] | null | null | null |
from .gobigger_structed_simple_model import GoBiggerHybridActionSimpleV3
from .my_gobigger_structed_model_v1 import MyGoBiggerHybridActionV1
| 70
| 72
| 0.935714
| 15
| 140
| 8.266667
| 0.666667
| 0.258065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022556
| 0.05
| 140
| 2
| 73
| 70
| 0.909774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e2bc3af3fad8262e4f0cc2a8d1f846408ae0a6c0
| 21
|
py
|
Python
|
datasets/__init__.py
|
zack466/autoreg-sr
|
88146370c04bc299c0f4fa3a43d9dbc237bb102c
|
[
"BSD-3-Clause"
] | null | null | null |
datasets/__init__.py
|
zack466/autoreg-sr
|
88146370c04bc299c0f4fa3a43d9dbc237bb102c
|
[
"BSD-3-Clause"
] | null | null | null |
datasets/__init__.py
|
zack466/autoreg-sr
|
88146370c04bc299c0f4fa3a43d9dbc237bb102c
|
[
"BSD-3-Clause"
] | null | null | null |
from .div2k import *
| 10.5
| 20
| 0.714286
| 3
| 21
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0.190476
| 21
| 1
| 21
| 21
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e2d1c534677315f6466e246bf5f311b6dc6c8b9a
| 3,978
|
py
|
Python
|
home/migrations/0046_auto_20190905_0939.py
|
davidjrichardson/toucans
|
7446b78ec2a09ff90eb83d4a78638c909deb06e1
|
[
"MIT"
] | 1
|
2020-04-20T05:37:09.000Z
|
2020-04-20T05:37:09.000Z
|
home/migrations/0046_auto_20190905_0939.py
|
davidjrichardson/toucans
|
7446b78ec2a09ff90eb83d4a78638c909deb06e1
|
[
"MIT"
] | 23
|
2019-03-13T10:54:36.000Z
|
2022-03-11T23:33:59.000Z
|
home/migrations/0046_auto_20190905_0939.py
|
davidjrichardson/toucans
|
7446b78ec2a09ff90eb83d4a78638c909deb06e1
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.5 on 2019-09-05 09:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0045_auto_20190409_1450'),
]
operations = [
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='bb_black_score',
field=models.IntegerField(verbose_name='BB Black'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='bb_blue_score',
field=models.IntegerField(verbose_name='BB Blue'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='bb_gold_score',
field=models.IntegerField(verbose_name='BB Gold'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='bb_red_score',
field=models.IntegerField(verbose_name='BB Red'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='bb_white_score',
field=models.IntegerField(verbose_name='BB White'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='cb_black_score',
field=models.IntegerField(verbose_name='CP Black'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='cb_blue_score',
field=models.IntegerField(verbose_name='CP Blue'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='cb_gold_score',
field=models.IntegerField(verbose_name='CP Gold'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='cb_red_score',
field=models.IntegerField(verbose_name='CP Red'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='cb_white_score',
field=models.IntegerField(verbose_name='CP White'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='lb_black_score',
field=models.IntegerField(verbose_name='LB Black'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='lb_blue_score',
field=models.IntegerField(verbose_name='LB Blue'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='lb_gold_score',
field=models.IntegerField(verbose_name='LB Gold'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='lb_red_score',
field=models.IntegerField(verbose_name='LB Red'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='lb_white_score',
field=models.IntegerField(verbose_name='LB White'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='rc_black_score',
field=models.IntegerField(verbose_name='RC Black'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='rc_blue_score',
field=models.IntegerField(verbose_name='RC Blue'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='rc_gold_score',
field=models.IntegerField(verbose_name='RC Gold'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='rc_red_score',
field=models.IntegerField(verbose_name='RC Red'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='rc_white_score',
field=models.IntegerField(verbose_name='RC White'),
),
]
| 34.894737
| 63
| 0.587733
| 349
| 3,978
| 6.461318
| 0.120344
| 0.177384
| 0.221729
| 0.257206
| 0.932594
| 0.932594
| 0.932594
| 0
| 0
| 0
| 0
| 0.011216
| 0.305178
| 3,978
| 113
| 64
| 35.20354
| 0.804631
| 0.011312
| 0
| 0.560748
| 1
| 0
| 0.217502
| 0.112694
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.009346
| 0
| 0.037383
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
390607443fe47de4159aa9c452011b3665fffa1f
| 36
|
py
|
Python
|
kmmi/exposure/__init__.py
|
Decitizen/kMMI
|
921ef6e45fbec484251444886e246741d7f0120a
|
[
"MIT"
] | null | null | null |
kmmi/exposure/__init__.py
|
Decitizen/kMMI
|
921ef6e45fbec484251444886e246741d7f0120a
|
[
"MIT"
] | null | null | null |
kmmi/exposure/__init__.py
|
Decitizen/kMMI
|
921ef6e45fbec484251444886e246741d7f0120a
|
[
"MIT"
] | null | null | null |
from kmmi.exposure.exposure import *
| 36
| 36
| 0.833333
| 5
| 36
| 6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 36
| 1
| 36
| 36
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
390eef8ac9197704b04a6094a11431c7d2503cdc
| 1,937
|
py
|
Python
|
Day6/6.py
|
thatguyandy27/AdventOfCode2021
|
90c4c27a7a9ec91844c8bf7d17d62586d3ec1913
|
[
"Apache-2.0"
] | null | null | null |
Day6/6.py
|
thatguyandy27/AdventOfCode2021
|
90c4c27a7a9ec91844c8bf7d17d62586d3ec1913
|
[
"Apache-2.0"
] | null | null | null |
Day6/6.py
|
thatguyandy27/AdventOfCode2021
|
90c4c27a7a9ec91844c8bf7d17d62586d3ec1913
|
[
"Apache-2.0"
] | null | null | null |
input = [1, 1, 1, 1, 1, 1, 1, 4, 1, 2, 1, 1, 4, 1, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 3, 1, 1, 2, 1, 2, 1, 3, 3, 4, 1, 4, 1, 1, 3, 1, 1, 5, 1, 1, 1, 1, 4, 1, 1, 5, 1, 1, 1, 4, 1, 5, 1, 1, 1, 3, 1, 1, 5, 3, 1, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 2, 4, 1, 1, 1, 1, 4, 1, 2, 2, 1, 1, 1, 3, 1, 2, 5, 1, 4, 1, 1, 1, 3, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1, 4, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 5, 1, 1, 1, 4, 1, 1, 5, 1, 1, 5, 3, 3, 5, 3, 1, 1,
1, 4, 1, 1, 1, 1, 1, 1, 5, 3, 1, 2, 1, 1, 1, 4, 1, 3, 1, 5, 1, 1, 2, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 4, 3, 2, 1, 2, 4, 1, 3, 1, 5, 1, 2, 1, 4, 1, 1, 1, 1, 1, 3, 1, 4, 1, 1, 1, 1, 3, 1, 3, 3, 1, 4, 3, 4, 1, 1, 1, 1, 5, 1, 3, 3, 2, 5, 3, 1, 1, 3, 1, 3, 1, 1, 1, 1, 4, 1, 1, 1, 1, 3, 1, 5, 1, 1, 1, 4, 4, 1, 1, 5, 5, 2, 4, 5, 1, 1, 1, 1, 5, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 1, 1, 3, 1, 1, 2, 1, 1]
new_num = 8
reset_num = 6
def getFishCounts(input):
fishes = [0] * (new_num + 1)
for i in input:
fishes[i] += 1
return fishes
def simDay(fishes):
newFishes = [0] * (new_num + 1)
# Move counters down
for i in range(0, new_num):
newFishes[i] = fishes[i + 1]
# Move the zeros back to 7
newFishes[reset_num] += fishes[0]
# Create new fishes
newFishes[8] = fishes[0]
return newFishes
def runSim(input, days):
fishes = getFishCounts(input)
for d in range(days):
fishes = simDay(fishes)
# print(f'Day: {d}: ', fishes)
return sum(fishes)
if __name__ == '__main__':
# test = runSim([3, 4, 3, 1, 2], 80)
# print(test)
isPart1 = False
if isPart1:
total = runSim(input, 80)
print('The answer is:', total)
else:
total = runSim(input, 256)
print('The answer is:', total)
# else:
# total = findWorstVents(filename, False)
# print('The answer is:', total)
| 33.396552
| 461
| 0.453278
| 436
| 1,937
| 1.981651
| 0.119266
| 0.3125
| 0.302083
| 0.259259
| 0.40162
| 0.344907
| 0.300926
| 0.203704
| 0.186343
| 0.108796
| 0
| 0.249238
| 0.322664
| 1,937
| 57
| 462
| 33.982456
| 0.409299
| 0.11461
| 0
| 0.068966
| 0
| 0
| 0.021114
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0
| 0
| 0.206897
| 0.068966
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
391f566a4819260dcb8114df4800170f63917127
| 128
|
py
|
Python
|
grpc/clients/python/vegaapiclient/generated/wallet/v1/__init__.py
|
legg/api
|
a818834f8a935b802af3b01b4237e64ed41ab3f2
|
[
"MIT"
] | 6
|
2021-05-20T15:30:46.000Z
|
2022-02-22T12:06:39.000Z
|
grpc/clients/python/vegaapiclient/generated/wallet/v1/__init__.py
|
legg/api
|
a818834f8a935b802af3b01b4237e64ed41ab3f2
|
[
"MIT"
] | 29
|
2021-03-16T11:58:05.000Z
|
2021-10-05T14:04:45.000Z
|
vegaapiclient/generated/vega/wallet/v1/__init__.py
|
vegaprotocol/sdk-python
|
2491f62704afd806a47cb8467a7edf0dd65bbf1b
|
[
"MIT"
] | 6
|
2021-05-07T06:43:02.000Z
|
2022-03-29T07:18:01.000Z
|
from . import wallet_pb2_grpc as wallet_grpc
from . import wallet_pb2 as wallet
__all__ = [
"wallet_grpc",
"wallet",
]
| 16
| 44
| 0.703125
| 18
| 128
| 4.5
| 0.388889
| 0.246914
| 0.395062
| 0.469136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019802
| 0.210938
| 128
| 7
| 45
| 18.285714
| 0.782178
| 0
| 0
| 0
| 0
| 0
| 0.132813
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
1a31249dd4025a966d8f9e01d3235e3a9810453b
| 566
|
py
|
Python
|
venv/lib/python3.8/site-packages/keras/api/_v2/keras/applications/densenet/__init__.py
|
JIANG-CX/data_labeling
|
8d2470bbb537dfc09ed2f7027ed8ee7de6447248
|
[
"MIT"
] | 1
|
2021-05-24T10:08:51.000Z
|
2021-05-24T10:08:51.000Z
|
venv/lib/python3.8/site-packages/keras/api/_v2/keras/applications/densenet/__init__.py
|
JIANG-CX/data_labeling
|
8d2470bbb537dfc09ed2f7027ed8ee7de6447248
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/keras/api/_v2/keras/applications/densenet/__init__.py
|
JIANG-CX/data_labeling
|
8d2470bbb537dfc09ed2f7027ed8ee7de6447248
|
[
"MIT"
] | null | null | null |
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.keras.applications.densenet namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from keras.applications.densenet import DenseNet121
from keras.applications.densenet import DenseNet169
from keras.applications.densenet import DenseNet201
from keras.applications.densenet import decode_predictions
from keras.applications.densenet import preprocess_input
del _print_function
| 33.294118
| 82
| 0.844523
| 75
| 566
| 6.186667
| 0.52
| 0.219828
| 0.323276
| 0.3125
| 0.377155
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017647
| 0.09894
| 566
| 16
| 83
| 35.375
| 0.892157
| 0.323322
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.875
| 0
| 0.875
| 0.25
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.