hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a1c856b268aeebc7840d2380fb74682b65d2fd67
| 56
|
py
|
Python
|
anthill/tools/services/login/__init__.py
|
0x55AAh/anthill_gaming
|
475af798bd08d85fc0fbfce9d2ba710f73252c15
|
[
"MIT"
] | 1
|
2018-11-30T21:56:14.000Z
|
2018-11-30T21:56:14.000Z
|
anthill/tools/services/login/__init__.py
|
0x55AAh/anthill_gaming
|
475af798bd08d85fc0fbfce9d2ba710f73252c15
|
[
"MIT"
] | null | null | null |
anthill/tools/services/login/__init__.py
|
0x55AAh/anthill_gaming
|
475af798bd08d85fc0fbfce9d2ba710f73252c15
|
[
"MIT"
] | null | null | null |
from .. import Service
class Login(Service):
pass
| 9.333333
| 22
| 0.678571
| 7
| 56
| 5.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.232143
| 56
| 5
| 23
| 11.2
| 0.883721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
a1eb1222a9f1fc4889daec4ed5f0c967dba529c8
| 22
|
py
|
Python
|
pyBASS/__init__.py
|
lanl/pyBASS
|
968077b53ca4d9a39884b6d3ea52b6a57d8576fa
|
[
"BSD-3-Clause"
] | 8
|
2021-09-06T08:47:12.000Z
|
2022-03-21T19:44:12.000Z
|
pyBASS/__init__.py
|
lanl/pyBASS
|
968077b53ca4d9a39884b6d3ea52b6a57d8576fa
|
[
"BSD-3-Clause"
] | 2
|
2021-12-13T18:55:40.000Z
|
2021-12-21T18:14:26.000Z
|
pyBASS/__init__.py
|
lanl/pyBASS
|
968077b53ca4d9a39884b6d3ea52b6a57d8576fa
|
[
"BSD-3-Clause"
] | 2
|
2021-05-05T22:28:24.000Z
|
2021-12-16T00:23:43.000Z
|
from .pyBASS import *
| 11
| 21
| 0.727273
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
62bf1dfe40fa9f5e0b3d5f7def61407170bcc958
| 21
|
py
|
Python
|
nodebox/sound/__init__.py
|
mcjocobe/drawExploration
|
2c50526ef14dea5bc3802b7fda08871919d62ac4
|
[
"BSD-3-Clause"
] | 76
|
2015-01-21T11:21:08.000Z
|
2022-02-04T13:33:19.000Z
|
nodebox/sound/__init__.py
|
mcjocobe/drawExploration
|
2c50526ef14dea5bc3802b7fda08871919d62ac4
|
[
"BSD-3-Clause"
] | 8
|
2015-11-12T07:42:58.000Z
|
2020-06-09T10:01:15.000Z
|
nodebox/sound/__init__.py
|
mcjocobe/drawExploration
|
2c50526ef14dea5bc3802b7fda08871919d62ac4
|
[
"BSD-3-Clause"
] | 23
|
2015-01-12T12:07:40.000Z
|
2020-04-13T16:32:15.000Z
|
from process import *
| 21
| 21
| 0.809524
| 3
| 21
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 21
| 1
| 21
| 21
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
62e76a49aec3490b3224380b500745ab42d7710d
| 3,355
|
py
|
Python
|
test.py
|
imapex/CHROnIC_Bus
|
3d5ce51f9ee84aad5d0aeba3759595c4f62d5cb4
|
[
"MIT"
] | null | null | null |
test.py
|
imapex/CHROnIC_Bus
|
3d5ce51f9ee84aad5d0aeba3759595c4f62d5cb4
|
[
"MIT"
] | null | null | null |
test.py
|
imapex/CHROnIC_Bus
|
3d5ce51f9ee84aad5d0aeba3759595c4f62d5cb4
|
[
"MIT"
] | 2
|
2016-11-04T16:43:40.000Z
|
2017-02-15T16:10:05.000Z
|
#!/usr/bin/python
import app
import unittest
ct = 'application/json'
class FlaskTestCase(unittest.TestCase):
def setUp(self):
app.app.config['TESTING'] = True
self.app = app.app.test_client()
def test_top_level_http_response(self):
resp = self.app.get('/')
self.assertEqual(resp.status_code, 200)
def test_get_no_results(self):
resp = self.app.get('/api/get/testplan123', content_type=ct)
self.assertEqual(resp.status_code, 404)
def test_getstatus_no_results(self):
resp = self.app.get('/api/status/1', content_type=ct)
self.assertEqual(resp.status_code, 404)
def test_poststatus_no_results(self):
d = '{"status": "2"}'
resp = self.app.post('/api/status/1', data=d, content_type=ct)
self.assertEqual(resp.status_code, 404)
def test_delete_no_results(self):
resp = self.app.delete('/api/send/testplan123')
self.assertEqual(resp.status_code, 404)
def test_delete_results(self):
resp = self.app.delete('/api/send/testplan123')
d = '{"msgdata": "data1"}'
resp = self.app.post('/api/send/testplan123', data=d, content_type=ct)
resp = self.app.delete('/api/send/testplan123')
self.assertEqual(resp.status_code, 204)
def test_post_result_one(self):
resp = self.app.delete('/api/send/testplan123')
d = '{"msgdata": "data1"}'
resp = self.app.post('/api/send/testplan123', data=d, content_type=ct)
self.app.delete('/api/send/testplan123')
self.assertEqual(resp.data, b'1')
def test_post_result_two(self):
resp = self.app.delete('/api/send/testplan123')
d = '{"msgdata": "data1"}'
resp = self.app.post('/api/send/testplan123', data=d, content_type=ct)
d = '{"msgdata": "data2"}'
resp = self.app.post('/api/send/testplan123', data=d, content_type=ct)
self.app.delete('/api/send/testplan123')
self.assertEqual(resp.data, b'2')
def test_get_results(self):
resp = self.app.delete('/api/send/testplan123')
d = '{"msgdata": "data1"}'
resp = self.app.post('/api/send/testplan123', data=d, content_type=ct)
resp = self.app.get('/api/get/testplan123', content_type=ct)
self.app.delete('/api/send/testplan123')
self.assertEqual(resp.status_code, 200)
def test_getstatus_results(self):
resp = self.app.delete('/api/send/testplan123')
d = '{"msgdata": "data1"}'
resp = self.app.post('/api/send/testplan123', data=d, content_type=ct)
resp = self.app.get('/api/status/1', content_type=ct)
self.app.delete('/api/send/testplan123')
self.assertEqual(resp.status_code, 200)
def test_poststatus_results(self):
resp = self.app.delete('/api/send/testplan123')
d = '{"msgdata": "data1"}'
resp = self.app.post('/api/send/testplan123', data=d, content_type=ct)
d = '{"status": "2"}'
resp = self.app.post('/api/status/1', data=d, content_type=ct)
self.app.delete('/api/send/testplan123')
self.assertEqual(resp.status_code, 200)
def test_bad_path(self):
resp = self.app.get('/api/badcall')
self.assertEqual(resp.status_code, 404)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| 36.467391
| 78
| 0.628614
| 448
| 3,355
| 4.564732
| 0.138393
| 0.102689
| 0.123716
| 0.101711
| 0.831785
| 0.822983
| 0.811736
| 0.794621
| 0.762836
| 0.746699
| 0
| 0.041808
| 0.208644
| 3,355
| 91
| 79
| 36.868132
| 0.728437
| 0.004769
| 0
| 0.597222
| 0
| 0
| 0.218095
| 0.125824
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.194444
| false
| 0.013889
| 0.027778
| 0
| 0.236111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1a01172863bd096733f3e1119e0ba8749cf6e1d1
| 96
|
py
|
Python
|
terrascript/nomad/__init__.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/nomad/__init__.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/nomad/__init__.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
# terrascript/nomad/__init__.py
import terrascript
class nomad(terrascript.Provider):
pass
| 16
| 34
| 0.791667
| 11
| 96
| 6.545455
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 96
| 6
| 35
| 16
| 0.857143
| 0.302083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
1a07dce1706cb6d3fdf95f03a4c1e171f0054831
| 47
|
py
|
Python
|
mountaintools/cairio/__init__.py
|
tjd2002/spikeforest2
|
2e393564b858b2995aa2ccccd9bd73065681b5de
|
[
"Apache-2.0"
] | null | null | null |
mountaintools/cairio/__init__.py
|
tjd2002/spikeforest2
|
2e393564b858b2995aa2ccccd9bd73065681b5de
|
[
"Apache-2.0"
] | null | null | null |
mountaintools/cairio/__init__.py
|
tjd2002/spikeforest2
|
2e393564b858b2995aa2ccccd9bd73065681b5de
|
[
"Apache-2.0"
] | null | null | null |
from .cairioclient import CairioClient, client
| 23.5
| 46
| 0.851064
| 5
| 47
| 8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 47
| 1
| 47
| 47
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1a13e8fd55e243ec52ce954ab31689f8a93fbcd5
| 4,925
|
py
|
Python
|
neukrill_net/model_combination.py
|
Neuroglycerin/neukrill-net-tools
|
133c68403128e6fcea6d6c8c8326b45443ef7f4e
|
[
"MIT"
] | null | null | null |
neukrill_net/model_combination.py
|
Neuroglycerin/neukrill-net-tools
|
133c68403128e6fcea6d6c8c8326b45443ef7f4e
|
[
"MIT"
] | null | null | null |
neukrill_net/model_combination.py
|
Neuroglycerin/neukrill-net-tools
|
133c68403128e6fcea6d6c8c8326b45443ef7f4e
|
[
"MIT"
] | null | null | null |
"""
Creates a theano based gradient descent optimiser for finding good choices of
weights to combine model predictions.
"""
import theano as th
import theano.tensor as tt
import numpy as np
def compile_model_combination_weight_optimiser(lr_adjuster = lambda h, t: h):
model_weights = tt.vector('w') # indexed over K models
model_preds = tt.tensor3('P') # indexed over N examples, M classes, K models
true_labels = tt.matrix('Y') # indexed over N examples, M classes
learning_rate = tt.scalar('h')
n_steps = tt.iscalar('n_steps')
# use softmax form to ensure weights all >=0 and sum to one
comb_preds = (tt.sum(tt.exp(model_weights) * model_preds, axis=2) /
tt.sum(tt.exp(model_weights), axis=0))
# mean negative log loss cost function
cost = - tt.mean(tt.sum(tt.log(comb_preds) * true_labels, axis=1))
# gradient of log loss cost with respect to weights
dC_dW = lambda W: th.clone(th.gradient.jacobian(cost, model_weights),
{model_weights: W})
# scan through gradient descent updates of weights, applying learning rate
# adjuster at each step
[Ws, hs], updates = th.scan(
fn = lambda t, W, h: [W - h * dC_dW(W), lr_adjuster(h, t)],
outputs_info = [model_weights, learning_rate],
sequences = [th.tensor.arange(n_steps)],
n_steps = n_steps,
name = 'weight cost gradient descent')
# create a function to get last updated weight from scan sequence
weights_optimiser = th.function(
inputs = [model_weights, model_preds, true_labels, learning_rate,
n_steps],
outputs = Ws[-1],
updates = updates,
allow_input_downcast = True,
)
# also compile a function for evaluating cost function to check optimiser
# performance / convergence
cost_func = th.function([model_weights, model_preds, true_labels], cost)
return weights_optimiser, cost_func
def compile_per_class_model_combination_weight_optimiser(lr_adjuster = lambda h, t: h):
model_weights = tt.matrix('w') # indexed over M classes, K models
model_preds = tt.tensor3('P') # indexed over N examples, M classes, K models
true_labels = tt.matrix('Y') # indexed over N examples, M classes
learning_rate = tt.scalar('h')
n_steps = tt.iscalar('n_steps')
# use softmax form to ensure weights all >=0 and sum to one
comb_preds = (tt.sum(tt.exp(model_weights) * model_preds, axis=2) /
tt.sum(tt.exp(model_weights), axis=1))
# mean negative log loss cost function
cost = - tt.mean(tt.sum(tt.log(comb_preds) * true_labels, axis=1))
# gradient of log loss cost with respect to weights
dC_dW = lambda W: th.clone(th.gradient.jacobian(cost, model_weights),
{model_weights: W})
# scan through gradient descent updates of weights, applying learning rate
# adjuster at each step
[Ws, hs], updates = th.scan(
fn = lambda t, W, h: [W - h * dC_dW(W), lr_adjuster(h, t)],
outputs_info = [model_weights, learning_rate],
sequences = [th.tensor.arange(n_steps)],
n_steps = n_steps,
name = 'weight cost gradient descent')
# create a function to get last updated weight from scan sequence
weights_optimiser = th.function(
inputs = [model_weights, model_preds, true_labels, learning_rate,
n_steps],
outputs = Ws[-1],
updates = updates,
allow_input_downcast = True,
)
# also compile a function for evaluating cost function to check optimiser
# performance / convergence
cost_func = th.function([model_weights, model_preds, true_labels], cost)
return weights_optimiser, cost_func
if __name__ == '__main__':
"""
Test with randomly generated model predictions and labels.
"""
N_MODELS = 3
N_CLASSES = 10
N_DATA = 100
SEED = 1234
INIT_LEARNING_RATE = 0.1
LR_ADJUSTER = lambda h, t: h
N_STEP = 1000
prng = np.random.RandomState(SEED)
weights = np.zeros((N_CLASSES, N_MODELS))
model_pred_vals = prng.rand(N_DATA, N_CLASSES, N_MODELS)
model_pred_vals = model_pred_vals / model_pred_vals.sum(1)[:,None,:]
true_label_vals = prng.rand(N_DATA, N_CLASSES)
true_label_vals = true_label_vals == true_label_vals.max(axis=1)[:,None]
optimiser, cost = compile_per_class_model_combination_weight_optimiser(LR_ADJUSTER)
print('Initial weights {0}'.format(weights))
print('Initial cost value {0}'.format(
cost(weights, model_pred_vals, true_label_vals)))
updated_weights = optimiser(weights, model_pred_vals, true_label_vals,
INIT_LEARNING_RATE, N_STEP)
print('Final weights {0}'.format(updated_weights))
print('Final cost value {0}'.format(
cost(updated_weights, model_pred_vals, true_label_vals)))
| 43.201754
| 87
| 0.665787
| 693
| 4,925
| 4.516595
| 0.206349
| 0.061342
| 0.04345
| 0.042173
| 0.821725
| 0.808946
| 0.782109
| 0.720447
| 0.720447
| 0.697764
| 0
| 0.009043
| 0.236548
| 4,925
| 113
| 88
| 43.584071
| 0.823404
| 0.230457
| 0
| 0.564103
| 0
| 0
| 0.044493
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.038462
| 0
| 0.089744
| 0.051282
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a7fc3dfdb7f781593a5c59dce1595b7f8099347d
| 6,201
|
py
|
Python
|
basic_code/load.py
|
Soapy-Salted-Fish-King/Emotion-FAN
|
5a48f9290f48d713397761db9d72efa9220b5ef3
|
[
"MIT"
] | 275
|
2019-09-11T10:22:06.000Z
|
2022-03-29T07:14:31.000Z
|
basic_code/load.py
|
DebinMeng19-OpenSourceLibrary/Emotion-FAN
|
874e871999a2002cd5dd9dffff2c4400c2e1805b
|
[
"MIT"
] | 34
|
2019-09-11T11:32:32.000Z
|
2022-03-18T09:32:42.000Z
|
basic_code/load.py
|
DebinMeng19-OpenSourceLibrary/Emotion-FAN
|
874e871999a2002cd5dd9dffff2c4400c2e1805b
|
[
"MIT"
] | 69
|
2019-09-18T19:00:17.000Z
|
2022-03-08T11:43:49.000Z
|
from __future__ import print_function
import torch
print(torch.__version__)
import torch.utils.data
import torchvision.transforms as transforms
from basic_code import data_generator
cate2label = {'CK+':{0: 'Happy', 1: 'Angry', 2: 'Disgust', 3: 'Fear', 4: 'Sad', 5: 'Contempt', 6: 'Surprise',
'Angry': 1,'Disgust': 2,'Fear': 3,'Happy': 0,'Contempt': 5,'Sad': 4,'Surprise': 6},
'AFEW':{0: 'Happy',1: 'Angry',2: 'Disgust',3: 'Fear',4: 'Sad',5: 'Neutral',6: 'Surprise',
'Angry': 1,'Disgust': 2,'Fear': 3,'Happy': 0,'Neutral': 5,'Sad': 4,'Surprise': 6}}
def ckplus_faces_baseline(video_root, video_list, fold, batchsize_train, batchsize_eval):
train_dataset = data_generator.TenFold_VideoDataset(
video_root=video_root,
video_list=video_list,
rectify_label=cate2label['CK+'],
transform=transforms.Compose([transforms.Resize(224), transforms.RandomHorizontalFlip(), transforms.ToTensor()]),
fold=fold,
run_type='train'
)
val_dataset = data_generator.TenFold_VideoDataset(
video_root=video_root,
video_list=video_list,
rectify_label=cate2label['CK+'],
transform=transforms.Compose([transforms.Resize(224), transforms.ToTensor()]),
fold=fold,
run_type='test'
)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batchsize_train, shuffle=True, num_workers=8,pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=batchsize_eval, shuffle=False, num_workers=8, pin_memory=True)
return train_loader, val_loader
def ckplus_faces_fan(video_root, video_list, fold, batchsize_train, batchsize_eval):
train_dataset = data_generator.TenFold_TripleImageDataset(
video_root=video_root,
video_list=video_list,
rectify_label=cate2label['CK+'],
transform=transforms.Compose([
transforms.Resize(224), transforms.RandomHorizontalFlip(), transforms.ToTensor()]),
fold=fold,
run_type='train',
)
val_dataset = data_generator.TenFold_VideoDataset(
video_root=video_root,
video_list=video_list,
rectify_label=cate2label['CK+'],
transform=transforms.Compose([transforms.Resize(224), transforms.ToTensor()]),
fold=fold,
run_type='test'
)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batchsize_train, shuffle=True, num_workers=8,pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=batchsize_eval, shuffle=False, num_workers=8, pin_memory=True)
return train_loader, val_loader
def afew_faces_baseline(root_train, list_train, batchsize_train, root_eval, list_eval, batchsize_eval):
train_dataset = data_generator.VideoDataset(
video_root=root_train,
video_list=list_train,
rectify_label=cate2label['AFEW'],
transform=transforms.Compose([transforms.Resize(224), transforms.RandomHorizontalFlip(), transforms.ToTensor()]),
)
val_dataset = data_generator.VideoDataset(
video_root=root_eval,
video_list=list_eval,
rectify_label=cate2label['AFEW'],
transform=transforms.Compose([transforms.Resize(224), transforms.ToTensor()]),
csv=False)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batchsize_train, shuffle=True,
num_workers=8, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=batchsize_eval, shuffle=False,
num_workers=8, pin_memory=True)
return train_loader, val_loader
def afew_faces_fan(root_train, list_train, batchsize_train, root_eval, list_eval, batchsize_eval):
train_dataset = data_generator.TripleImageDataset(
video_root=root_train,
video_list=list_train,
rectify_label=cate2label['AFEW'],
transform=transforms.Compose([transforms.Resize(224), transforms.RandomHorizontalFlip(), transforms.ToTensor()]),
)
val_dataset = data_generator.VideoDataset(
video_root=root_eval,
video_list=list_eval,
rectify_label=cate2label['AFEW'],
transform=transforms.Compose([transforms.Resize(224), transforms.ToTensor()]),
csv=False)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batchsize_train, shuffle=True,
num_workers=8, pin_memory=True, drop_last=True)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=batchsize_eval, shuffle=False,
num_workers=8, pin_memory=True)
return train_loader, val_loader
def model_parameters(_structure, _parameterDir):
checkpoint = torch.load(_parameterDir)
pretrained_state_dict = checkpoint['state_dict']
model_state_dict = _structure.state_dict()
for key in pretrained_state_dict:
if ((key == 'module.fc.weight') | (key == 'module.fc.bias')):
pass
else:
model_state_dict[key.replace('module.', '')] = pretrained_state_dict[key]
_structure.load_state_dict(model_state_dict)
model = torch.nn.DataParallel(_structure).cuda()
return model
| 45.262774
| 153
| 0.587325
| 624
| 6,201
| 5.543269
| 0.160256
| 0.036427
| 0.040474
| 0.083261
| 0.832032
| 0.810639
| 0.810639
| 0.80717
| 0.80717
| 0.80717
| 0
| 0.016228
| 0.314304
| 6,201
| 136
| 154
| 45.595588
| 0.797272
| 0
| 0
| 0.630631
| 0
| 0
| 0.041606
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045045
| false
| 0.009009
| 0.045045
| 0
| 0.135135
| 0.018018
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c505ec7a70aebc306a64376c725499551ec1f125
| 181
|
py
|
Python
|
lab5_moduly/5.1.py
|
Damian9449/Python
|
dc9091e15356733821bbb6a768b7d5e428640340
|
[
"MIT"
] | 1
|
2017-11-15T13:03:40.000Z
|
2017-11-15T13:03:40.000Z
|
lab5_moduly/5.1.py
|
Damian9449/Python
|
dc9091e15356733821bbb6a768b7d5e428640340
|
[
"MIT"
] | null | null | null |
lab5_moduly/5.1.py
|
Damian9449/Python
|
dc9091e15356733821bbb6a768b7d5e428640340
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import rekurencja
import rekurencja as rek
from rekurencja import factorial
from rekurencja import fibonacci as fib
print(rekurencja.factorial(6))
print(fib(5))
| 18.1
| 39
| 0.80663
| 26
| 181
| 5.615385
| 0.538462
| 0.328767
| 0.273973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0125
| 0.116022
| 181
| 9
| 40
| 20.111111
| 0.9
| 0.088398
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
c52f2cb5665c9225cc331c7afdccbd81e9901d35
| 35
|
py
|
Python
|
Package/V0.0.0-beta/Scripts/monte_carlo.py
|
Password-Classified/Stock-Search
|
517787e29de9e531e2b01ba94ee3d8a2a8928dca
|
[
"Unlicense"
] | 3
|
2021-11-07T20:16:54.000Z
|
2022-01-24T07:47:52.000Z
|
Source/Scripts/monte_carlo.py
|
Password-Classified/Stock-Search
|
517787e29de9e531e2b01ba94ee3d8a2a8928dca
|
[
"Unlicense"
] | null | null | null |
Source/Scripts/monte_carlo.py
|
Password-Classified/Stock-Search
|
517787e29de9e531e2b01ba94ee3d8a2a8928dca
|
[
"Unlicense"
] | null | null | null |
from Scripts.data import get_data
| 11.666667
| 33
| 0.828571
| 6
| 35
| 4.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 35
| 2
| 34
| 17.5
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c540768269f664fa57d294335d673cda2ec78edd
| 98
|
py
|
Python
|
tests/test_hello_world.py
|
sluger/myserverplugin
|
c66ca1cddb5ce2f530be8d0ada89a95eebb25df9
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_hello_world.py
|
sluger/myserverplugin
|
c66ca1cddb5ce2f530be8d0ada89a95eebb25df9
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_hello_world.py
|
sluger/myserverplugin
|
c66ca1cddb5ce2f530be8d0ada89a95eebb25df9
|
[
"BSD-3-Clause"
] | null | null | null |
from myserverplugin import hello_world
def test_hello_world():
assert hello_world is not None
| 16.333333
| 38
| 0.816327
| 15
| 98
| 5.066667
| 0.733333
| 0.394737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153061
| 98
| 5
| 39
| 19.6
| 0.915663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
c565e1279b56c770b906f3704f486f912be143b1
| 34
|
py
|
Python
|
wordle/helpers.py
|
AntonGomes/wordle
|
2e20aa84f3430d31d291e91421a6f503185d6295
|
[
"MIT"
] | null | null | null |
wordle/helpers.py
|
AntonGomes/wordle
|
2e20aa84f3430d31d291e91421a6f503185d6295
|
[
"MIT"
] | null | null | null |
wordle/helpers.py
|
AntonGomes/wordle
|
2e20aa84f3430d31d291e91421a6f503185d6295
|
[
"MIT"
] | null | null | null |
def isValid(word):
return True
| 11.333333
| 18
| 0.705882
| 5
| 34
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 34
| 2
| 19
| 17
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
3dd979614c8418a0018c01535c3b6e4ddbd28cb5
| 38
|
bzl
|
Python
|
tools/build_defs/apple/fb_apple_test.bzl
|
CrshOverride/react-native
|
260c5a393fe2708f3d12c722b6d189ec3057743a
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
tools/build_defs/apple/fb_apple_test.bzl
|
CrshOverride/react-native
|
260c5a393fe2708f3d12c722b6d189ec3057743a
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
tools/build_defs/apple/fb_apple_test.bzl
|
CrshOverride/react-native
|
260c5a393fe2708f3d12c722b6d189ec3057743a
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
def fb_apple_test(**kwargs):
pass
| 12.666667
| 28
| 0.684211
| 6
| 38
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 38
| 2
| 29
| 19
| 0.774194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
9a847810c47a6b47e57b717e340b9e3dc96828a0
| 44
|
py
|
Python
|
tests/__init__.py
|
roansong/osu-replay-parser
|
70a206622b51bb8443d423f6da671bb005cb32f7
|
[
"MIT"
] | 1
|
2019-12-08T07:22:56.000Z
|
2019-12-08T07:22:56.000Z
|
tests/__init__.py
|
roansong/osu-replay-parser
|
70a206622b51bb8443d423f6da671bb005cb32f7
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
roansong/osu-replay-parser
|
70a206622b51bb8443d423f6da671bb005cb32f7
|
[
"MIT"
] | null | null | null |
from .replay_test import TestStandardReplay
| 22
| 43
| 0.886364
| 5
| 44
| 7.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 44
| 1
| 44
| 44
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9a9359e91dbdb03cb73b915309f8efefc068c5f6
| 2,472
|
py
|
Python
|
src/genie/libs/parser/junos/tests/Ping/cli/equal/golden_output_2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/junos/tests/Ping/cli/equal/golden_output_2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/junos/tests/Ping/cli/equal/golden_output_2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
"ping": {
"address": "2001:db8:223c:2c16::2",
"data-bytes": 56,
"result": [
{
"bytes": 16,
"from": "2001:db8:223c:2c16::2",
"hlim": 64,
"icmp-seq": 0,
"time": "973.514",
},
{
"bytes": 16,
"from": "2001:db8:223c:2c16::2",
"hlim": 64,
"icmp-seq": 1,
"time": "0.993",
},
{
"bytes": 16,
"from": "2001:db8:223c:2c16::2",
"hlim": 64,
"icmp-seq": 2,
"time": "1.170",
},
{
"bytes": 16,
"from": "2001:db8:223c:2c16::2",
"hlim": 64,
"icmp-seq": 3,
"time": "0.677",
},
{
"bytes": 16,
"from": "2001:db8:223c:2c16::2",
"hlim": 64,
"icmp-seq": 4,
"time": "0.914",
},
{
"bytes": 16,
"from": "2001:db8:223c:2c16::2",
"hlim": 64,
"icmp-seq": 5,
"time": "0.814",
},
{
"bytes": 16,
"from": "2001:db8:223c:2c16::2",
"hlim": 64,
"icmp-seq": 6,
"time": "0.953",
},
{
"bytes": 16,
"from": "2001:db8:223c:2c16::2",
"hlim": 64,
"icmp-seq": 7,
"time": "1.140",
},
{
"bytes": 16,
"from": "2001:db8:223c:2c16::2",
"hlim": 64,
"icmp-seq": 8,
"time": "0.800",
},
{
"bytes": 16,
"from": "2001:db8:223c:2c16::2",
"hlim": 64,
"icmp-seq": 9,
"time": "0.881",
},
],
"source": "2001:db8:223c:2c16::1",
"statistics": {
"loss-rate": 0,
"received": 10,
"round-trip": {
"avg": "98.186",
"max": "973.514",
"min": "0.677",
"stddev": "291.776",
},
"send": 10,
},
}
}
| 27.164835
| 48
| 0.264159
| 201
| 2,472
| 3.243781
| 0.283582
| 0.128834
| 0.202454
| 0.276074
| 0.638037
| 0.613497
| 0.613497
| 0.613497
| 0.613497
| 0.613497
| 0
| 0.242647
| 0.559871
| 2,472
| 90
| 49
| 27.466667
| 0.356618
| 0
| 0
| 0.333333
| 0
| 0
| 0.270227
| 0.101942
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b149686145c5f7eb3e955e98e7348ccbcc245399
| 1,453
|
py
|
Python
|
storage_engine/base.py
|
JackInTaiwan/ViDB
|
d658fd4f6a1ad2d7d36bb270fde2a373d3cc965d
|
[
"MIT"
] | 2
|
2021-05-29T06:57:24.000Z
|
2021-06-15T09:13:38.000Z
|
storage_engine/base.py
|
JackInTaiwan/ViDB
|
d658fd4f6a1ad2d7d36bb270fde2a373d3cc965d
|
[
"MIT"
] | null | null | null |
storage_engine/base.py
|
JackInTaiwan/ViDB
|
d658fd4f6a1ad2d7d36bb270fde2a373d3cc965d
|
[
"MIT"
] | null | null | null |
import abc
import json
class BaseStorageEngine(metaclass=abc.ABCMeta):
@abc.abstractmethod
def init_storage(self):
return NotImplemented
@abc.abstractmethod
def create_one(self, image:str, thumbnail:str, features, metadata:json):
return NotImplemented
@abc.abstractmethod
def create_many(self, image:list, thumbnail:list, features:list, metadata:list):
return NotImplemented
@abc.abstractmethod
def read_one(self, index, mode):
return NotImplemented
@abc.abstractmethod
def read_many(self, index:list, mode):
return NotImplemented
@abc.abstractmethod
def delete_one(self, index):
return NotImplemented
@abc.abstractmethod
def delete_many(self, index:list): # TBD: how to relocate files
return NotImplemented
@abc.abstractmethod
def update_one(self, index, metadata):
return NotImplemented
@abc.abstractmethod
def update_many(self, index, metadata):
return NotImplemented
@abc.abstractmethod
def generate_id(self):
return NotImplemented
@abc.abstractmethod
def generate_c_at(self): # create time
return NotImplemented
@abc.abstractmethod
def locate_id(self, index): # TBD: how to relocate files
return NotImplemented
@abc.abstractmethod
def update_storage_table(self, file_path, delete=False):
return NotImplemented
| 21.367647
| 84
| 0.689608
| 159
| 1,453
| 6.201258
| 0.27044
| 0.224138
| 0.263692
| 0.450304
| 0.647059
| 0.606491
| 0.251521
| 0.251521
| 0.135903
| 0.135903
| 0
| 0
| 0.23744
| 1,453
| 68
| 85
| 21.367647
| 0.889892
| 0.044735
| 0
| 0.619048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.309524
| false
| 0
| 0.047619
| 0.309524
| 0.690476
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
492b33a242fdfb68a060e0d0491ea9065ba2fe0a
| 4,858
|
py
|
Python
|
livestyled/schemas/fields.py
|
andrelopez/python-sdk
|
3c83d4698ecf6b5b59003d20cb26644e0dd77f61
|
[
"MIT"
] | null | null | null |
livestyled/schemas/fields.py
|
andrelopez/python-sdk
|
3c83d4698ecf6b5b59003d20cb26644e0dd77f61
|
[
"MIT"
] | null | null | null |
livestyled/schemas/fields.py
|
andrelopez/python-sdk
|
3c83d4698ecf6b5b59003d20cb26644e0dd77f61
|
[
"MIT"
] | null | null | null |
from marshmallow import class_registry, fields
from marshmallow.base import SchemaABC
class RelatedResourceLinkField(fields.Field):
def __init__(
self,
schema=None,
many=False,
microservice_aware=False,
**kwargs
):
self._schema_arg = schema
self.many = many
self.__schema = None
self.__microservice_aware = microservice_aware
super(RelatedResourceLinkField, self).__init__(**kwargs)
@property
def schema(self):
if not self.__schema and self._schema_arg:
if isinstance(self._schema_arg, SchemaABC):
self.__schema = self._schema_arg
elif isinstance(self._schema_arg, type) and issubclass(self._schema_arg, SchemaABC):
self.__schema = self._schema_arg
elif isinstance(self._schema_arg, str):
if self._schema_arg == 'self':
self.__schema = self.parent.__class__
else:
self.__schema = class_registry.get_class(self._schema_arg)
else:
raise ValueError('Nested fields must be passed a Schema, not {0}.'.format(self.nested.__class__))
return self.__schema
def _serialize(self, value, attr, obj, **kwargs):
if value:
if isinstance(value, (str, int)):
if self.__microservice_aware:
return '/{}/{}'.format(self.schema.Meta.url, value)
else:
return '/v4/{}/{}'.format(self.schema.Meta.api_type, value)
else:
if self.__microservice_aware:
return '/{}/{}'.format(self.schema.Meta.url, value.id)
else:
return '/v4/{}/{}'.format(self.schema.Meta.api_type, value.id)
return None
def _deserialize(self, value, attr, data, **kwargs):
if self.many:
return [int(v.split('/')[-1]) for v in value]
elif isinstance(value, dict):
return int(value['id'])
return int(value.split('/')[-1])
class RelatedResourceField(fields.Field):
def __init__(
self,
schema=None,
many=False,
microservice_aware=False,
**kwargs
):
self._schema_arg = schema
self.many = many
self.__schema = None
self.__microservice_aware = microservice_aware
super(RelatedResourceField, self).__init__(**kwargs)
@property
def schema(self):
if not self.__schema and self._schema_arg:
if isinstance(self._schema_arg, SchemaABC):
self.__schema = self._schema_arg
elif isinstance(self._schema_arg, type) and issubclass(self._schema_arg, SchemaABC):
self.__schema = self._schema_arg
elif isinstance(self._schema_arg, str):
if self._schema_arg == 'self':
self.__schema = self.parent.__class__
else:
self.__schema = class_registry.get_class(self._schema_arg)
else:
raise ValueError('Nested fields must be passed a Schema, not {0}.'.format(self.nested.__class__))
return self.__schema
def _serialize(self, value, attr, obj, **kwargs):
if value:
if isinstance(value, (str, int)):
if self.__microservice_aware:
return '/{}/{}'.format(self.schema.Meta.url, value)
else:
return '/v4/{}/{}'.format(self.schema.Meta.url, value)
elif isinstance(value, list):
r_value = []
for v in value:
if self.__microservice_aware:
r_value.append('/{}/{}'.format(self.schema.Meta.url, v.id))
else:
r_value.append('/v4/{}/{}'.format(self.schema.Meta.api_type, v.id))
r_value.append('/{}/{}'.format(self.schema.Meta.url, v.id))
return r_value
else:
if self.__microservice_aware:
return '/{}/{}'.format(self.schema.Meta.url, value.id)
else:
return '/v4/{}/{}'.format(self.schema.Meta.api_type, value.id)
else:
if self.many:
return []
else:
return None
def _deserialize(self, value, attr, data, **kwargs):
if self.many:
deserialized = []
for v in value:
if isinstance(v, str):
deserialized.append(int(v.split('/')[-1]))
elif isinstance(v, dict):
deserialized.append(self.schema().load(v))
return deserialized
else:
return self.schema().load(value)
| 38.251969
| 113
| 0.53767
| 503
| 4,858
| 4.908549
| 0.135189
| 0.198461
| 0.105306
| 0.089105
| 0.793034
| 0.782503
| 0.779263
| 0.767517
| 0.767517
| 0.767517
| 0
| 0.00317
| 0.350556
| 4,858
| 126
| 114
| 38.555556
| 0.779398
| 0
| 0
| 0.789474
| 0
| 0
| 0.038699
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0.017544
| 0.017544
| 0
| 0.27193
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
493ff6a4ece23b1832f3e7baa3d75f2ce1895cf2
| 26
|
py
|
Python
|
make_var/__init__.py
|
karnigen/make_var
|
536be7107099830facaa0835bed2331778fc9e94
|
[
"MIT"
] | null | null | null |
make_var/__init__.py
|
karnigen/make_var
|
536be7107099830facaa0835bed2331778fc9e94
|
[
"MIT"
] | null | null | null |
make_var/__init__.py
|
karnigen/make_var
|
536be7107099830facaa0835bed2331778fc9e94
|
[
"MIT"
] | 1
|
2022-02-01T12:57:57.000Z
|
2022-02-01T12:57:57.000Z
|
from .make_var import *
| 6.5
| 23
| 0.692308
| 4
| 26
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 26
| 3
| 24
| 8.666667
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
494b81a2fa9658b2822d06afd8dfb953051081e9
| 21
|
py
|
Python
|
tact-random/__init__.py
|
tactlabs/tactrandom
|
ff52d19eefe7c10ed17442e845f78397c8149517
|
[
"MIT"
] | 2
|
2019-03-21T07:14:19.000Z
|
2020-06-23T12:53:15.000Z
|
Lib/site-packages/numjy/random/__init__.py
|
Yaqiang/jythonlab
|
d031d85e5bd5f19943c6a410c56ceb734c533534
|
[
"CNRI-Jython",
"Apache-2.0"
] | null | null | null |
Lib/site-packages/numjy/random/__init__.py
|
Yaqiang/jythonlab
|
d031d85e5bd5f19943c6a410c56ceb734c533534
|
[
"CNRI-Jython",
"Apache-2.0"
] | null | null | null |
from .random import *
| 21
| 21
| 0.761905
| 3
| 21
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 21
| 1
| 21
| 21
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
49917025b5391af044654bc61bf168869c1ed2e5
| 926
|
py
|
Python
|
core/admin.py
|
hackforthesea/hackforthesea.tech
|
33b7522c13d87b26a39e9dfbdcad4067b44cda06
|
[
"BSD-3-Clause"
] | 1
|
2018-09-17T04:35:06.000Z
|
2018-09-17T04:35:06.000Z
|
core/admin.py
|
hackforthesea/hackforthesea.tech
|
33b7522c13d87b26a39e9dfbdcad4067b44cda06
|
[
"BSD-3-Clause"
] | 5
|
2021-04-08T18:28:06.000Z
|
2022-02-10T08:24:03.000Z
|
core/admin.py
|
hackforthesea/hackforthesea.tech
|
33b7522c13d87b26a39e9dfbdcad4067b44cda06
|
[
"BSD-3-Clause"
] | 1
|
2018-09-17T04:35:08.000Z
|
2018-09-17T04:35:08.000Z
|
from django.contrib import admin
# from .models import Sponsor, CommunityPartner, Location, Team, Participant, \
# Submission, FrequentlyAskedQuestion
# class SponsorAdmin(admin.ModelAdmin):
# pass
# class CommunityPartnerAdmin(admin.ModelAdmin):
# pass
# class LocationAdmin(admin.ModelAdmin):
# pass
# class SubmissionAdmin(admin.ModelAdmin):
# pass
# class TeamAdmin(admin.ModelAdmin):
# pass
# class ParticipantAdmin(admin.ModelAdmin):
# pass
# class FrequentlyAskedQuestionAdmin(admin.ModelAdmin):
# pass
# admin.site.register(Sponsor,SponsorAdmin)
# admin.site.register(CommunityPartner,CommunityPartnerAdmin)
# admin.site.register(Location,LocationAdmin)
# admin.site.register(Participant,ParticipantAdmin)
# admin.site.register(Team,TeamAdmin)
# admin.site.register(Submission,SubmissionAdmin)
# admin.site.register(FrequentlyAskedQuestion,FrequentlyAskedQuestionAdmin)
| 22.585366
| 79
| 0.774298
| 85
| 926
| 8.435294
| 0.294118
| 0.146444
| 0.185495
| 0.200837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12311
| 926
| 41
| 80
| 22.585366
| 0.883005
| 0.896328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4993f6406a9a73a3ef0b60cd7b6145884beaf326
| 16,925
|
py
|
Python
|
koko/lib/dottext.py
|
TheBeachLab/kokopelli
|
529b8149a951363d2a027946464ea0bb22346428
|
[
"MIT"
] | null | null | null |
koko/lib/dottext.py
|
TheBeachLab/kokopelli
|
529b8149a951363d2a027946464ea0bb22346428
|
[
"MIT"
] | 1
|
2018-11-23T11:52:41.000Z
|
2018-11-23T11:52:41.000Z
|
koko/lib/dottext.py
|
TheBeachLab/kokopelli
|
529b8149a951363d2a027946464ea0bb22346428
|
[
"MIT"
] | null | null | null |
# koko.lib.dottext.py
# Simple dot matrix math-string based font.
# Modified by Francisco Sanchez
# original code by Matt Keeter
# matt.keeter@cba.mit.edu
# kokompe.cba.mit.edu
################################################################################
from koko.lib.shapes2d import *
def text(text, x, y, height = 1, align = 'CC'):
dx, dy = 0, -1
text_shape = None
for line in text.split('\n'):
line_shape = None
for c in line:
if not c in _glyphs.keys():
print 'Warning: Unknown character "%s" in koko.lib.text' % c
else:
chr_math = move(_glyphs[c], dx, dy)
if line_shape is None: line_shape = chr_math
else: line_shape += chr_math
dx += _glyphs[c].width + 0.1
dx -= 0.1
if line_shape is not None:
if align[0] == 'L':
pass
elif align[0] == 'C':
line_shape = move(line_shape, -dx / 2, 0)
elif align[0] == 'R':
line_shape = move(line_shape, -dx, 0)
text_shape += line_shape
dy -= 1.55
dx = 0
dy += 1.55
if text_shape is None: return None
if align[1] == 'T':
pass
elif align[1] == 'B':
text_shape = move(text_shape, 0, -dy,)
elif align[1] == 'C':
text_shape = move(text_shape, 0, -dy/2)
if height != 1:
text_shape = scale_xy(text_shape, 0, 0, height)
dx *= height
dy *= height
return move(text_shape, x, y)
_glyphs = {}
shape = triangle(0, 0, 0.35, 1, 0.1, 0)
shape += triangle(0.1, 0, 0.35, 1, 0.45, 1)
shape += triangle(0.35, 1, 0.45, 1, 0.8, 0)
shape += triangle(0.7, 0, 0.35, 1, 0.8, 0)
shape += rectangle(0.2, 0.6, 0.3, 0.4)
shape.width = 0.8
_glyphs['A'] = shape
shape = circle(0.25, 0.275, 0.275)
shape -= circle(0.25, 0.275, 0.175)
shape = shear_x_y(shape, 0, 0.35, 0, 0.1)
shape += rectangle(0.51, 0.61, 0, 0.35)
shape = move(shape, -0.05, 0)
shape.width = 0.58
_glyphs['a'] = shape
shape = circle(0.3, 0.725, 0.275)
shape -= circle(0.3, 0.725, 0.175)
shape += circle(0.3, 0.275, 0.275)
shape -= circle(0.3, 0.275, 0.175)
shape &= rectangle(0.3, 1, 0, 1)
shape += rectangle(0, 0.1, 0, 1)
shape += rectangle(0.1, 0.3, 0, 0.1)
shape += rectangle(0.1, 0.3, 0.45, 0.55)
shape += rectangle(0.1, 0.3, 0.9, 1)
shape.width = 0.575
_glyphs['B'] = shape
shape = circle(0.25, 0.275, 0.275)
shape -= circle(0.25, 0.275, 0.175)
shape &= rectangle(0.25, 1, 0, 0.275) + rectangle(0, 1, 0.275, 1)
shape += rectangle(0, 0.1, 0, 1)
shape += rectangle(0.1, 0.25, 0, 0.1)
shape.width = 0.525
_glyphs['b'] = shape
shape = circle(0.3, 0.7, 0.3) - circle(0.3, 0.7, 0.2)
shape += circle(0.3, 0.3, 0.3) - circle(0.3, 0.3, 0.2)
shape -= rectangle(0, 0.6, 0.3, 0.7)
shape -= triangle(0.3, 0.5, 1, 1.5, 1, -0.5)
shape -= rectangle(0.3, 0.6, 0.2, 0.8)
shape += rectangle(0, 0.1, 0.3, 0.7)
shape.width = 0.57
_glyphs['C'] = shape
shape = circle(0.275, 0.275, 0.275)
shape -= circle(0.275, 0.275, 0.175)
shape -= triangle(0.275, 0.275, 0.55, 0.55, 0.55, 0)
shape.width = 0.48
_glyphs['c'] = shape
shape = circle(0.1, 0.5, 0.5) - circle(0.1, 0.5, 0.4)
shape &= rectangle(0, 1, 0, 1)
shape += rectangle(0, 0.1, 0, 1)
shape.width = 0.6
_glyphs['D'] = shape
shape = reflect_x(_glyphs['b'], _glyphs['b'].width/2)
shape.width = _glyphs['b'].width
_glyphs['d'] = shape
shape = rectangle(0, 0.1, 0, 1)
shape += rectangle(0.1, 0.6, 0.9, 1)
shape += rectangle(0.1, 0.6, 0, 0.1)
shape += rectangle(0.1, 0.5, 0.45, 0.55)
shape.width = 0.6
_glyphs['E'] = shape
shape = circle(0.275, 0.275, 0.275)
shape -= circle(0.275, 0.275, 0.175)
shape -= triangle(0.1, 0.275, 0.75, 0.275, 0.6, 0)
shape += rectangle(0.05, 0.55, 0.225, 0.315)
shape &= circle(0.275, 0.275, 0.275)
shape.width = 0.55
_glyphs['e'] = shape
shape = rectangle(0, 0.1, 0, 1)
shape += rectangle(0.1, 0.6, 0.9, 1)
shape += rectangle(0.1, 0.5, 0.45, 0.55)
shape.width = 0.6
_glyphs['F'] = shape
shape = circle(0.4, 0.75, 0.25) - circle(0.4, 0.75, 0.15)
shape &= rectangle(0, 0.4, 0.75, 1)
shape += rectangle(0, 0.4, 0.45, 0.55)
shape += rectangle(0.15, 0.25, 0, 0.75)
shape.width = 0.4
_glyphs['f'] = shape
shape = circle(0.275, -0.1, 0.275)
shape -= circle(0.275, -0.1, 0.175)
shape &= rectangle(0, 0.55, -0.375, -0.1)
shape += circle(0.275, 0.275, 0.275) - circle(0.275, 0.275, 0.175)
shape += rectangle(0.45, 0.55, -0.1, 0.55)
shape.width = 0.55
_glyphs['g'] = shape
shape = circle(0.3, 0.7, 0.3) - circle(0.3, 0.7, 0.2)
shape += circle(0.3, 0.3, 0.3) - circle(0.3, 0.3, 0.2)
shape -= rectangle(0, 0.6, 0.3, 0.7)
shape += rectangle(0, 0.1, 0.3, 0.7)
shape += rectangle(0.5, 0.6, 0.3, 0.4)
shape += rectangle(0.3, 0.6, 0.4, 0.5)
shape.width = 0.6
_glyphs['G'] = shape
shape = rectangle(0, 0.1, 0, 1)
shape += rectangle(0.5, 0.6, 0, 1)
shape += rectangle(0.1, 0.5, 0.45, 0.55)
shape.width = 0.6
_glyphs['H'] = shape
shape = circle(0.275, 0.275, 0.275)
shape -= circle(0.275, 0.275, 0.175)
shape &= rectangle(0, 0.55, 0.275, 0.55)
shape += rectangle(0, 0.1, 0, 1)
shape += rectangle(0.45, 0.55, 0, 0.275)
shape.width = 0.55
_glyphs['h'] = shape
shape = rectangle(0, 0.5, 0, 0.1)
shape += rectangle(0, 0.5, 0.9, 1)
shape += rectangle(0.2, 0.3, 0.1, 0.9)
shape.width = 0.5
_glyphs['I'] = shape
shape = rectangle(0.025, 0.125, 0, 0.55)
shape += circle(0.075, 0.7, 0.075)
shape.width = 0.15
_glyphs['i'] = shape
shape = circle(0.275, 0.275, 0.275)
shape -= circle(0.275, 0.275, 0.175)
shape &= rectangle(0, 0.55, 0, 0.275)
shape += rectangle(0.45, 0.55, 0.275, 1)
shape.width = 0.55
_glyphs['J'] = shape
shape = circle(0.0, -0.1, 0.275)
shape -= circle(0.0, -0.1, 0.175)
shape &= rectangle(0, 0.55, -0.375, -0.1)
shape += rectangle(0.175, 0.275, -0.1, 0.55)
shape += circle(0.225, 0.7, 0.075)
shape.width = 0.3
_glyphs['j'] = shape
shape = rectangle(0, 0.6, 0, 1)
shape -= triangle(0.1, 1, 0.5, 1, 0.1, 0.6)
shape -= triangle(0.5, 0, 0.1, 0, 0.1, 0.4)
shape -= triangle(0.6, 0.95, 0.6, 0.05, 0.18, 0.5)
shape.width = 0.6
_glyphs['K'] = shape
shape = rectangle(0, 0.5, 0, 1)
shape -= triangle(0.1, 1, 0.5, 1, 0.1, 0.45)
shape -= triangle(0.36, 0, 0.1, 0, 0.1, 0.25)
shape -= triangle(0.6, 1, 0.5, 0.0, 0.18, 0.35)
shape -= triangle(0.1, 1, 0.6, 1, 0.6, 0.5)
shape.width = 0.5
_glyphs['k'] = shape
shape = rectangle(0, 0.6, 0, 0.1)
shape += rectangle(0, 0.1, 0, 1)
shape.width = 0.6
_glyphs['L'] = shape
shape = rectangle(0.025, 0.125, 0, 1)
shape.width = 0.15
_glyphs['l'] = shape
shape = rectangle(0, 0.1, 0, 1)
shape += rectangle(0.7, 0.8, 0, 1)
shape += triangle(0, 1, 0.1, 1, 0.45, 0)
shape += triangle(0.45, 0, 0.35, 0, 0, 1)
shape += triangle(0.7, 1, 0.8, 1, 0.35, 0)
shape += triangle(0.35, 0, 0.8, 1, 0.45, 0)
shape.width = 0.8
_glyphs['M'] = shape
shape = circle(0.175, 0.35, 0.175) - circle(0.175, 0.35, 0.075)
shape += circle(0.425, 0.35, 0.175) - circle(0.425, 0.35, 0.075)
shape &= rectangle(0, 0.65, 0.35, 0.65)
shape += rectangle(0, 0.1, 0, 0.525)
shape += rectangle(0.25, 0.35, 0, 0.35)
shape += rectangle(0.5, 0.6, 0, 0.35)
shape.width = 0.6
_glyphs['m'] = shape
shape = rectangle(0, 0.1, 0, 1)
shape += rectangle(0.5, 0.6, 0, 1)
shape += triangle(0, 1, 0.1, 1, 0.6, 0)
shape += triangle(0.6, 0, 0.5, 0, 0, 1)
shape.width = 0.6
_glyphs['N'] = shape
shape = circle(0.275, 0.275, 0.275)
shape -= circle(0.275, 0.275, 0.175)
shape &= rectangle(0, 0.55, 0.325, 0.55)
shape += rectangle(0, 0.1, 0, 0.55)
shape += rectangle(0.45, 0.55, 0, 0.325)
shape.width = 0.55
_glyphs['n'] = shape
shape = circle(0.3, 0.7, 0.3) - circle(0.3, 0.7, 0.2)
shape += circle(0.3, 0.3, 0.3) - circle(0.3, 0.3, 0.2)
shape -= rectangle(0, 0.6, 0.3, 0.7)
shape += rectangle(0, 0.1, 0.3, 0.7)
shape += rectangle(0.5, 0.6, 0.3, 0.7)
shape.width = 0.6
_glyphs['O'] = shape
shape = circle(0.275, 0.275, 0.275)
shape -= circle(0.275, 0.275, 0.175)
shape.width = 0.55
_glyphs['o'] = shape
shape = circle(0.3, 0.725, 0.275)
shape -= circle(0.3, 0.725, 0.175)
shape &= rectangle(0.3, 1, 0, 1)
shape += rectangle(0, 0.1, 0, 1)
shape += rectangle(0.1, 0.3, 0.45, 0.55)
shape += rectangle(0.1, 0.3, 0.9, 1)
shape.width = 0.575
_glyphs['P'] = shape
shape = circle(0.275, 0.275, 0.275)
shape -= circle(0.275, 0.275, 0.175)
shape += rectangle(0, 0.1, -0.375, 0.55)
shape.width = 0.55
_glyphs['p'] = shape
shape = circle(0.3, 0.7, 0.3) - circle(0.3, 0.7, 0.2)
shape += circle(0.3, 0.3, 0.3) - circle(0.3, 0.3, 0.2)
shape -= rectangle(0, 0.6, 0.3, 0.7)
shape += rectangle(0, 0.1, 0.3, 0.7)
shape += rectangle(0.5, 0.6, 0.3, 0.7)
shape += triangle(0.5, 0.1, 0.6, 0.1, 0.6, 0)
shape += triangle(0.5, 0.1, 0.5, 0.3, 0.6, 0.1)
shape.width = 0.6
_glyphs['Q'] = shape
shape = circle(0.275, 0.275, 0.275) - circle(0.275, 0.275, 0.175)
shape += rectangle(0.45, 0.55, -0.375, 0.55)
shape.width = 0.55
_glyphs['q'] = shape
shape = circle(0.3, 0.725, 0.275)
shape -= circle(0.3, 0.725, 0.175)
shape &= rectangle(0.3, 1, 0, 1)
shape += rectangle(0, 0.1, 0, 1)
shape += rectangle(0.1, 0.3, 0.45, 0.55)
shape += rectangle(0.1, 0.3, 0.9, 1)
shape += triangle(0.3, 0.5, 0.4, 0.5, 0.575, 0)
shape += triangle(0.475, 0.0, 0.3, 0.5, 0.575, 0)
shape.width = 0.575
_glyphs['R'] = shape
shape = circle(0.55, 0, 0.55) - scale_x(circle(0.55, 0, 0.45), 0.55, 0.8)
shape &= rectangle(0, 0.55, 0, 0.55)
shape = scale_x(shape, 0, 0.7)
shape += rectangle(0, 0.1, 0, 0.55)
shape.width = 0.385
_glyphs['r'] = shape
shape = circle(0.275, 0.725, 0.275)
shape -= circle(0.275, 0.725, 0.175)
shape -= rectangle(0.275, 0.55, 0.45, 0.725)
shape += reflect_x(reflect_y(shape, 0.5), .275)
shape.width = 0.55
_glyphs['S'] = shape
shape = circle(0.1625, 0.1625, 0.1625)
shape -= scale_x(circle(0.165, 0.165, 0.0625), 0.165, 1.5)
shape -= rectangle(0, 0.1625, 0.1625, 0.325)
shape += reflect_x(reflect_y(shape, 0.275), 0.1625)
shape = scale_x(shape, 0, 1.5)
shape.width = 0.4875
_glyphs['s'] = shape
shape = rectangle(0, 0.6, 0.9, 1) + rectangle(0.25, 0.35, 0, 0.9)
shape.width = 0.6
_glyphs['T'] = shape
shape = circle(0.4, 0.25, 0.25) - circle(0.4, 0.25, 0.15)
shape &= rectangle(0, 0.4, 0, 0.25)
shape += rectangle(0, 0.4, 0.55, 0.65)
shape += rectangle(0.15, 0.25, 0.25, 1)
shape.width = 0.4
_glyphs['t'] = shape
shape = circle(0.3, 0.3, 0.3) - circle(0.3, 0.3, 0.2)
shape &= rectangle(0, 0.6, 0, 0.3)
shape += rectangle(0, 0.1, 0.3, 1)
shape += rectangle(0.5, 0.6, 0.3, 1)
shape.width = 0.6
_glyphs['U'] = shape
shape = circle(0.275, 0.275, 0.275) - circle(0.275, 0.275, 0.175)
shape &= rectangle(0, 0.55, 0, 0.275)
shape += rectangle(0, 0.1, 0.275, 0.55)
shape += rectangle(0.45, 0.55, 0, 0.55)
shape.width = 0.55
_glyphs['u'] = shape
shape = triangle(0, 1, 0.1, 1, 0.35, 0)
shape += triangle(0.35, 0, 0.25, 0, 0, 1)
shape += reflect_x(shape, 0.3)
shape.width = 0.6
_glyphs['V'] = shape
shape = triangle(0, 0.55, 0.1, 0.55, 0.35, 0)
shape += triangle(0.35, 0, 0.25, 0, 0, 0.55)
shape += reflect_x(shape, 0.3)
shape.width = 0.6
_glyphs['v'] = shape
shape = triangle(0, 1, 0.1, 1, 0.25, 0)
shape += triangle(0.25, 0, 0.15, 0, 0, 1)
shape += triangle(0.15, 0, 0.35, 1, 0.45, 1)
shape += triangle(0.45, 1, 0.25, 0, 0.15, 0)
shape += reflect_x(shape, 0.4)
shape.width = 0.8
_glyphs['W'] = shape
shape = triangle(0, 0.55, 0.1, 0.55, 0.25, 0)
shape += triangle(0.25, 0, 0.15, 0, 0, 0.55)
shape += triangle(0.15, 0, 0.35, 0.5, 0.45, 0.5)
shape += triangle(0.45, 0.5, 0.25, 0, 0.15, 0)
shape += reflect_x(shape, 0.4)
shape.width = 0.8
_glyphs['w'] = shape
shape = triangle(0, 1, 0.125, 1, 0.8, 0)
shape += triangle(0.8, 0, 0.675, 0, 0, 1)
shape += reflect_x(shape, 0.4)
shape.width = 0.8
_glyphs['X'] = shape
shape = triangle(0, 0.55, 0.125, 0.55, 0.55, 0)
shape += triangle(0.55, 0, 0.425, 0, 0, 0.55)
shape += reflect_x(shape, 0.275)
shape.width = 0.55
_glyphs['x'] = shape
shape = triangle(0, 1, 0.1, 1, 0.45, 0.5)
shape += triangle(0.45, 0.5, 0.35, 0.5, 0, 1)
shape += reflect_x(shape, 0.4)
shape += rectangle(0.35, 0.45, 0, 0.5)
shape.width = 0.8
_glyphs['Y'] = shape
shape = triangle(0, 0.55, 0.1, 0.55, 0.325, 0)
shape += triangle(0.325, 0, 0.225, 0, 0, 0.55)
shape += reflect_x(shape, 0.275) + move(reflect_x(shape, 0.275), -0.225, -0.55)
shape &= rectangle(0, 0.55, -0.375, 0.55)
shape.width = 0.55
_glyphs['y'] = shape
shape = rectangle(0, 0.6, 0, 1)
shape -= triangle(0, 0.1, 0, 0.9, 0.45, 0.9)
shape -= triangle(0.6, 0.1, 0.15, 0.1, 0.6, 0.9)
shape.width = 0.6
_glyphs['Z'] = shape
shape = rectangle(0, 0.6, 0, 0.55)
shape -= triangle(0, 0.1, 0, 0.45, 0.45, 0.45)
shape -= triangle(0.6, 0.1, 0.15, 0.1, 0.6, 0.45)
shape.width = 0.6
_glyphs['z'] = shape
shape = MathTree.Constant(1)
shape.bounds = [0,0,0,0,None,None,None]
shape.shape = True
shape.width = 0.55
shape.xmin, shape.xmax = 0, 0.55
shape.ymin, shape.ymax = 0, 1
_glyphs[' '] = shape
shape = circle(0.075, 0.075, 0.075)
shape = scale_y(shape, 0.075, 3)
shape &= rectangle(0.0, 0.15, -0.15, 0.075)
shape -= triangle(0.075, 0.075, 0.0, -0.15, -0.5, 0.075)
shape += circle(0.1, 0.075, 0.075)
shape.width = 0.175
_glyphs[','] = shape
shape = circle(0.075, 0.075, 0.075)
shape.width = 0.15
_glyphs['.'] = shape
shape = rectangle(0, 0.1, 0.55, 0.8)
shape.width = 0.1
_glyphs["'"] = shape
shape = rectangle(0, 0.1, 0.55, 0.8) + rectangle(0.2, 0.3, 0.55, 0.8)
shape.width = 0.3
_glyphs['"'] = shape
shape = circle(0.075, 0.15, 0.075) + circle(0.075, 0.45, 0.075)
shape.width = 0.15
_glyphs[':'] = shape
shape = circle(0.075, 0.15, 0.075)
shape = scale_y(shape, 0.15, 3)
shape &= rectangle(0.0, 0.15, -0.075, 0.15)
shape -= triangle(0.075, 0.15, 0.0, -0.075, -0.5, 0.15)
shape += circle(0.075, 0.45, 0.075)
shape += circle(0.1, 0.15, 0.075)
shape.width = 0.15
_glyphs[';'] = shape
shape = rectangle(0.025, 0.125, 0.3, 1)
shape += circle(0.075, 0.075, 0.075)
shape.width = 0.1
_glyphs['!'] = shape
shape = rectangle(0.05, 0.4, 0.35, 0.45)
shape.width = 0.45
_glyphs['-'] = shape
shape = circle(0, 0.4, 0.6) - scale_x(circle(0, 0.4, 0.5), 0, 0.7)
shape &= rectangle(0, 0.6, -0.2, 1)
shape = scale_x(shape, 0, 1/2.)
shape.width = 0.3
_glyphs[')'] = shape
shape = circle(0.6, 0.4, 0.6) - scale_x(circle(0.6, 0.4, 0.5), 0.6, 0.7)
shape &= rectangle(0, 0.6, -0.2, 1)
shape = scale_x(shape, 0, 1/2.)
shape.width = 0.3
_glyphs['('] = shape
shape = rectangle(0, 0.3, 0, 1)
shape -= circle(0, 1, 0.2)
shape -= rectangle(0, 0.2, 0, 0.7)
shape.width = 0.3
_glyphs['1'] = shape
shape = circle(0.275, .725, .275)
shape -= circle(0.275, 0.725, 0.175)
shape -= rectangle(0, 0.55, 0, 0.725)
shape += rectangle(0, 0.55, 0, 0.1)
shape += triangle(0, 0.1, 0.45, 0.775, 0.55, 0.725)
shape += triangle(0, 0.1, 0.55, 0.725, 0.125, 0.1)
shape.width = 0.55
_glyphs['2'] = shape
shape = circle(0.3, 0.725, 0.275)
shape -= circle(0.3, 0.725, 0.175)
shape += circle(0.3, 0.275, 0.275)
shape -= circle(0.3, 0.275, 0.175)
shape -= rectangle(0, 0.275, 0.275, 0.725)
shape.width = 0.55
_glyphs['3'] = shape
shape = triangle(-0.10, 0.45, 0.4, 1, 0.4, 0.45)
shape += rectangle(0.4, 0.5, 0, 1)
shape -= triangle(0.4, 0.85, 0.4, 0.55, 0.1, 0.55)
shape &= rectangle(0, 0.5, 0, 1)
shape.width = 0.5
_glyphs['4'] = shape
shape = circle(0.325, 0.325, 0.325) - circle(0.325, 0.325, 0.225)
shape -= rectangle(0, 0.325, 0.325, 0.65)
shape += rectangle(0, 0.325, 0.55, 0.65)
shape += rectangle(0, 0.1, 0.55, 1)
shape += rectangle(0.1, 0.65, 0.9, 1)
shape.width = 0.65
_glyphs['5'] = shape
shape = circle(0.275, 0.725, 0.275) - scale_y(circle(0.275, 0.725, 0.175), .725, 1.2)
shape &= rectangle(0, 0.55, 0.725, 1)
shape -= triangle(0.275, 0.925, 0.55, 0.9, 0.55, 0.725)
shape = scale_y(shape, 1, 2)
shape = scale_x(shape, 0, 1.1)
shape -= rectangle(0.275, 0.65, 0., 0.7)
shape += rectangle(0, 0.1, 0.275, 0.45)
shape += circle(0.275, 0.275, 0.275) - circle(0.275, 0.275, 0.175)
shape.width = 0.55
_glyphs['6'] = shape
shape = rectangle(0, 0.6, 0.9, 1)
shape += triangle(0, 0, 0.475, 0.9, 0.6, 0.9)
shape += triangle(0, 0, 0.6, 0.9, 0.125, 0)
shape.width = 0.6
_glyphs['7'] = shape
shape = circle(0.3, 0.725, 0.275)
shape -= circle(0.3, 0.725, 0.175)
shape += circle(0.3, 0.275, 0.275)
shape -= circle(0.3, 0.275, 0.175)
shape.width = 0.55
_glyphs['8'] = shape
shape = reflect_x(reflect_y(_glyphs['6'], 0.5), _glyphs['6'].width/2)
shape.width = _glyphs['6'].width
_glyphs['9'] = shape
shape = circle(0.5, 0.5, 0.5) - scale_x(circle(0.5, 0.5, 0.4), 0.5, 0.7**0.5)
shape = scale_x(shape, 0, 0.7)
shape.width = 0.7
_glyphs['0'] = shape
shape = rectangle(0., 0.5, 0.45, 0.55)
shape += rectangle(0.2, 0.3, 0.25, 0.75)
shape.width = 0.55
_glyphs['+'] = shape
shape = triangle(0, 0, 0.425, 1, 0.55, 1)
shape += triangle(0, 0, 0.55, 1, 0.125, 0)
shape.width = 0.55
_glyphs['/'] = shape
shape = circle(0.275, 0.725, 0.275) - circle(0.275, 0.725, 0.175)
shape -= rectangle(0, 0.275, 0.45, 0.725)
shape += rectangle(0.225, 0.325, 0.3, 0.55)
shape += circle(0.275, 0.075, 0.075)
shape.width = 0.55
_glyphs['?'] = shape
del shape
| 25.643939
| 85
| 0.58257
| 3,585
| 16,925
| 2.71046
| 0.041004
| 0.036843
| 0.189873
| 0.113615
| 0.845837
| 0.748997
| 0.655346
| 0.528558
| 0.474118
| 0.430174
| 0
| 0.228326
| 0.181507
| 16,925
| 659
| 86
| 25.682853
| 0.473111
| 0.010281
| 0
| 0.359504
| 0
| 0
| 0.008462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.004132
| 0.002066
| null | null | 0.002066
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
49a2c5e5f75f28db4ece07e1c76aa61e3215e183
| 34
|
py
|
Python
|
examples/math.lgamma/ex1.py
|
mcorne/python-by-example
|
15339c0909c84b51075587a6a66391100971c033
|
[
"MIT"
] | null | null | null |
examples/math.lgamma/ex1.py
|
mcorne/python-by-example
|
15339c0909c84b51075587a6a66391100971c033
|
[
"MIT"
] | null | null | null |
examples/math.lgamma/ex1.py
|
mcorne/python-by-example
|
15339c0909c84b51075587a6a66391100971c033
|
[
"MIT"
] | null | null | null |
import math
print(math.lgamma(2))
| 11.333333
| 21
| 0.764706
| 6
| 34
| 4.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 0.088235
| 34
| 2
| 22
| 17
| 0.806452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
772929c5030d8fc7d1b696f173eb6effdadd4ba5
| 60
|
py
|
Python
|
emlearn/tools/__init__.py
|
Brax94/emlearn
|
cc5fd962f5af601c02dfe0ec9203d1b30e6b3aef
|
[
"MIT"
] | 161
|
2019-03-12T16:07:20.000Z
|
2022-03-31T06:24:38.000Z
|
emlearn/tools/__init__.py
|
Brax94/emlearn
|
cc5fd962f5af601c02dfe0ec9203d1b30e6b3aef
|
[
"MIT"
] | 35
|
2019-05-14T11:34:04.000Z
|
2022-02-04T20:09:34.000Z
|
emlearn/tools/__init__.py
|
Brax94/emlearn
|
cc5fd962f5af601c02dfe0ec9203d1b30e6b3aef
|
[
"MIT"
] | 27
|
2019-03-11T01:09:27.000Z
|
2021-12-27T22:56:04.000Z
|
from . import mel_filterbank
from . import window_function
| 15
| 29
| 0.816667
| 8
| 60
| 5.875
| 0.75
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 60
| 3
| 30
| 20
| 0.921569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
65b7e06b740affffb8bbe74670c4341e8a0bae11
| 46
|
py
|
Python
|
federated_aggregations/__init__.py
|
tf-encrypted/federated-aggregations
|
b4ab7a15c2719d4119db7d9d609f8c06d9df8958
|
[
"Apache-2.0"
] | 16
|
2020-08-07T05:40:09.000Z
|
2022-01-08T20:32:07.000Z
|
federated_aggregations/__init__.py
|
tf-encrypted/federated-aggregations
|
b4ab7a15c2719d4119db7d9d609f8c06d9df8958
|
[
"Apache-2.0"
] | 1
|
2020-10-14T00:18:39.000Z
|
2020-10-19T14:13:03.000Z
|
federated_aggregations/__init__.py
|
tf-encrypted/federated-aggregations
|
b4ab7a15c2719d4119db7d9d609f8c06d9df8958
|
[
"Apache-2.0"
] | 2
|
2020-09-08T10:16:28.000Z
|
2021-01-14T12:33:01.000Z
|
from . import channels
from . import paillier
| 15.333333
| 22
| 0.782609
| 6
| 46
| 6
| 0.666667
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 46
| 2
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
028f7931e2bad42389a82de6259959a1fa81c700
| 132
|
py
|
Python
|
python/testData/inspections/PyAbstractClassInspection/quickFix/AddABCToSuperclasses/main_after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/PyAbstractClassInspection/quickFix/AddABCToSuperclasses/main_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/PyAbstractClassInspection/quickFix/AddABCToSuperclasses/main_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from abc import ABC
from PyAbstractClassInspection.quickFix.AddABCToSuperclasses.main_import import A1
class A2(A1, ABC):
pass
| 22
| 82
| 0.818182
| 17
| 132
| 6.294118
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026087
| 0.128788
| 132
| 6
| 83
| 22
| 0.904348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
02eaab6bf16c2c54a15d969b86eae1a7ce5c0db3
| 12,444
|
py
|
Python
|
ppr-api/src/ppr_api/models/search_utils.py
|
jordiwes/ppr
|
5e0cdf75036198b87a0f46a732c0e784e5f32259
|
[
"Apache-2.0"
] | 4
|
2020-01-21T21:46:42.000Z
|
2021-02-24T18:30:24.000Z
|
ppr-api/src/ppr_api/models/search_utils.py
|
jordiwes/ppr
|
5e0cdf75036198b87a0f46a732c0e784e5f32259
|
[
"Apache-2.0"
] | 1,313
|
2019-10-18T22:48:16.000Z
|
2022-03-30T17:42:47.000Z
|
ppr-api/src/ppr_api/models/search_utils.py
|
jordiwes/ppr
|
5e0cdf75036198b87a0f46a732c0e784e5f32259
|
[
"Apache-2.0"
] | 201
|
2019-10-18T21:34:41.000Z
|
2022-03-31T20:07:42.000Z
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model helper utilities for processing search query and search detail requests.
Search constants and helper functions.
"""
# flake8: noqa Q000,E122,E131
# Disable Q000: Allow query strings to be in double quotation marks that contain single quotation marks.
# Disable E122: allow query strings to be more human readable.
# Disable E131: allow query strings to be more human readable.
GET_DETAIL_DAYS_LIMIT = 7 # Number of days in the past a get details request is allowed.
# Maximum number of days in the past to filter when fetching account search history: set to <= 0 to disable.
GET_HISTORY_DAYS_LIMIT = -1
# Account search history max result set size.
ACCOUNT_SEARCH_HISTORY_MAX_SIZE = 1000
# Maximum number or results returned by search.
SEARCH_RESULTS_MAX_SIZE = 1000
# Result set size limit clause
RESULTS_SIZE_LIMIT_CLAUSE = 'FETCH FIRST :max_results_size ROWS ONLY'
# Serial number search base where clause
SERIAL_SEARCH_BASE = """
SELECT r.registration_type,r.registration_ts AS base_registration_ts,
sc.serial_type,sc.serial_number,sc.year,sc.make,sc.model,
r.registration_number AS base_registration_num,
CASE WHEN serial_number = :query_value THEN 'EXACT' ELSE 'SIMILAR' END match_type,
fs.expire_date,fs.state_type,sc.id AS vehicle_id, sc.mhr_number
FROM registrations r, financing_statements fs, serial_collateral sc
WHERE r.financing_id = fs.id
AND r.registration_type_cl IN ('PPSALIEN', 'MISCLIEN', 'CROWNLIEN')
AND r.base_reg_number IS NULL
AND (fs.expire_date IS NULL OR fs.expire_date > ((now() at time zone 'utc') - interval '30 days'))
AND NOT EXISTS (SELECT r3.id
FROM registrations r3
WHERE r3.financing_id = fs.id
AND r3.registration_type_cl = 'DISCHARGE'
AND r3.registration_ts < ((now() at time zone 'utc') - interval '30 days'))
AND sc.financing_id = fs.id
AND sc.registration_id_end IS NULL
"""
# Equivalent logic as DB view search_by_reg_num_vw, but API determines the where clause.
REG_NUM_QUERY = """
SELECT r2.registration_type, r2.registration_ts AS base_registration_ts,
r2.registration_number AS base_registration_num,
'EXACT' AS match_type, fs.state_type, fs.expire_date
FROM registrations r, financing_statements fs, registrations r2
WHERE r.financing_id = fs.id
AND r2.financing_id = fs.id
AND r2.registration_type_cl IN ('PPSALIEN', 'MISCLIEN', 'CROWNLIEN')
AND r.registration_number = :query_value
AND (fs.expire_date IS NULL OR fs.expire_date > ((now() at time zone 'utc') - interval '30 days'))
AND NOT EXISTS (SELECT r3.id
FROM registrations r3
WHERE r3.financing_id = fs.id
AND r3.registration_type_cl = 'DISCHARGE'
AND r3.registration_ts < ((now() at time zone 'utc') - interval '30 days'))
"""
# Equivalent logic as DB view search_by_mhr_num_vw, but API determines the where clause.
MHR_NUM_QUERY = SERIAL_SEARCH_BASE + \
" AND sc.serial_type = 'MH' " + \
"AND sc.mhr_number = (SELECT searchkey_mhr(:query_value)) " + \
"ORDER BY match_type, r.registration_ts ASC " + RESULTS_SIZE_LIMIT_CLAUSE
# Equivalent logic as DB view search_by_serial_num_vw, but API determines the where clause.
SERIAL_NUM_QUERY = SERIAL_SEARCH_BASE + \
" AND sc.serial_type NOT IN ('AC', 'AF', 'AP') " + \
"AND sc.srch_vin = (SELECT searchkey_vehicle(:query_value)) " + \
"ORDER BY match_type, sc.serial_number " + RESULTS_SIZE_LIMIT_CLAUSE
# Equivalent logic as DB view search_by_aircraft_dot_vw, but API determines the where clause.
AIRCRAFT_DOT_QUERY = SERIAL_SEARCH_BASE + \
" AND sc.serial_type IN ('AC', 'AF', 'AP') " + \
"AND sc.srch_vin = (SELECT searchkey_aircraft(:query_value)) " + \
"ORDER BY match_type, sc.serial_number " + RESULTS_SIZE_LIMIT_CLAUSE
BUSINESS_NAME_QUERY = """
SELECT r.registration_type,r.registration_ts AS base_registration_ts,
p.business_name,
r.registration_number AS base_registration_num,
CASE WHEN p.business_name = :query_bus_name THEN 'EXACT' ELSE 'SIMILAR' END match_type,
fs.expire_date,fs.state_type,p.id
FROM registrations r, financing_statements fs, parties p
WHERE r.financing_id = fs.id
AND r.registration_type_cl IN ('PPSALIEN', 'MISCLIEN', 'CROWNLIEN')
AND r.base_reg_number IS NULL
AND (fs.expire_date IS NULL OR fs.expire_date > ((now() at time zone 'utc') - interval '30 days'))
AND NOT EXISTS (SELECT r3.id
FROM registrations r3
WHERE r3.financing_id = fs.id
AND r3.registration_type_cl = 'DISCHARGE'
AND r3.registration_ts < ((now() at time zone 'utc') - interval '30 days'))
AND p.financing_id = fs.id
AND p.registration_id_end IS NULL
AND p.party_type = 'DB'
AND (SELECT searchkey_business_name(:query_bus_name)) <% p.business_srch_key
AND word_similarity(p.business_srch_key, (SELECT searchkey_business_name(:query_bus_name))) >= .60
ORDER BY match_type, p.business_name
""" + RESULTS_SIZE_LIMIT_CLAUSE
INDIVIDUAL_NAME_QUERY = """
SELECT r.registration_type,r.registration_ts AS base_registration_ts,
p.last_name,p.first_name,p.middle_initial,p.id,
r.registration_number AS base_registration_num,
CASE WHEN p.last_name = :query_last AND p.first_name = :query_first THEN 'EXACT' ELSE 'SIMILAR' END match_type,
fs.expire_date,fs.state_type, p.birth_date
FROM registrations r, financing_statements fs, parties p
WHERE r.financing_id = fs.id
AND r.registration_type_cl IN ('PPSALIEN', 'MISCLIEN', 'CROWNLIEN')
AND r.base_reg_number IS NULL
AND (fs.expire_date IS NULL OR fs.expire_date > ((now() at time zone 'utc') - interval '30 days'))
AND NOT EXISTS (SELECT r3.id
FROM registrations r3
WHERE r3.financing_id = fs.id
AND r3.registration_type_cl = 'DISCHARGE'
AND r3.registration_ts < ((now() at time zone 'utc') - interval '30 days'))
AND p.financing_id = fs.id
AND p.registration_id_end IS NULL
AND p.party_type = 'DI'
AND p.id IN (SELECT * FROM unnest(match_individual_name(:query_last, :query_first)))
ORDER BY match_type, p.last_name, p.first_name
""" + RESULTS_SIZE_LIMIT_CLAUSE
INDIVIDUAL_NAME_MIDDLE_QUERY = """
SELECT r.registration_type,r.registration_ts AS base_registration_ts,
p.last_name,p.first_name,p.middle_initial,p.id,
r.registration_number AS base_registration_num,
CASE WHEN p.last_name = :query_last AND
p.first_name = :query_first AND
p.middle_initial = :query_middle THEN 'EXACT' ELSE 'SIMILAR' END match_type,
fs.expire_date,fs.state_type, p.birth_date
FROM registrations r, financing_statements fs, parties p
WHERE r.financing_id = fs.id
AND r.registration_type_cl IN ('PPSALIEN', 'MISCLIEN', 'CROWNLIEN')
AND r.base_reg_number IS NULL
AND (fs.expire_date IS NULL OR fs.expire_date > ((now() at time zone 'utc') - interval '30 days'))
AND NOT EXISTS (SELECT r3.id
FROM registrations r3
WHERE r3.financing_id = fs.id
AND r3.registration_type_cl = 'DISCHARGE'
AND r3.registration_ts < ((now() at time zone 'utc') - interval '30 days'))
AND p.financing_id = fs.id
AND p.registration_id_end IS NULL
AND p.party_type = 'DI'
AND p.id IN (SELECT * FROM unnest(match_individual_name(:query_last, :query_first)))
ORDER BY match_type, p.last_name, p.first_name
""" + RESULTS_SIZE_LIMIT_CLAUSE
# Total result count queries for serial number, debtor name searches:
BUSINESS_NAME_TOTAL_COUNT = """
SELECT COUNT(r.id) AS query_count
FROM registrations r, financing_statements fs, parties p
WHERE r.financing_id = fs.id
AND r.registration_type_cl IN ('PPSALIEN', 'MISCLIEN', 'CROWNLIEN')
AND r.base_reg_number IS NULL
AND (fs.expire_date IS NULL OR fs.expire_date > ((now() at time zone 'utc') - interval '30 days'))
AND NOT EXISTS (SELECT r3.id
FROM registrations r3
WHERE r3.financing_id = fs.id
AND r3.registration_type_cl = 'DISCHARGE'
AND r3.registration_ts < ((now() at time zone 'utc') - interval '30 days'))
AND p.financing_id = fs.id
AND p.registration_id_end IS NULL
AND p.party_type = 'DB'
AND (SELECT searchkey_business_name(:query_bus_name)) <% p.business_srch_key
AND word_similarity(p.business_srch_key, (SELECT searchkey_business_name(:query_bus_name))) >= .60
"""
INDIVIDUAL_NAME_TOTAL_COUNT = """
SELECT COUNT(r.id) AS query_count
FROM registrations r, financing_statements fs, parties p
WHERE r.financing_id = fs.id
AND r.registration_type_cl IN ('PPSALIEN', 'MISCLIEN', 'CROWNLIEN')
AND r.base_reg_number IS NULL
AND (fs.expire_date IS NULL OR fs.expire_date > ((now() at time zone 'utc') - interval '30 days'))
AND NOT EXISTS (SELECT r3.id
FROM registrations r3
WHERE r3.financing_id = fs.id
AND r3.registration_type_cl = 'DISCHARGE'
AND r3.registration_ts < ((now() at time zone 'utc') - interval '30 days'))
AND p.financing_id = fs.id
AND p.registration_id_end IS NULL
AND p.party_type = 'DI'
AND p.id IN (SELECT * FROM unnest(match_individual_name(:query_last, :query_first)))
"""
SERIAL_SEARCH_COUNT_BASE = """
SELECT COUNT(r.id) AS query_count
FROM registrations r, financing_statements fs, serial_collateral sc
WHERE r.financing_id = fs.id
AND r.registration_type_cl IN ('PPSALIEN', 'MISCLIEN', 'CROWNLIEN')
AND r.base_reg_number IS NULL
AND (fs.expire_date IS NULL OR fs.expire_date > ((now() at time zone 'utc') - interval '30 days'))
AND NOT EXISTS (SELECT r3.id
FROM registrations r3
WHERE r3.financing_id = fs.id
AND r3.registration_type_cl = 'DISCHARGE'
AND r3.registration_ts < ((now() at time zone 'utc') - interval '30 days'))
AND sc.financing_id = fs.id
AND sc.registration_id_end IS NULL
"""
MHR_NUM_TOTAL_COUNT = SERIAL_SEARCH_COUNT_BASE + \
" AND sc.serial_type = 'MH' " + \
"AND sc.mhr_number = searchkey_mhr(:query_value)"
SERIAL_NUM_TOTAL_COUNT = SERIAL_SEARCH_COUNT_BASE + \
" AND sc.serial_type NOT IN ('AC', 'AF') " + \
"AND sc.srch_vin = searchkey_vehicle(:query_value)"
AIRCRAFT_DOT_TOTAL_COUNT = SERIAL_SEARCH_COUNT_BASE + \
" AND sc.serial_type IN ('AC', 'AF') " + \
"AND sc.srch_vin = searchkey_aircraft(:query_value)"
COUNT_QUERY_FROM_SEARCH_TYPE = {
'AC': AIRCRAFT_DOT_TOTAL_COUNT,
'BS': BUSINESS_NAME_TOTAL_COUNT,
'IS': INDIVIDUAL_NAME_TOTAL_COUNT,
'MH': MHR_NUM_TOTAL_COUNT,
'SS': SERIAL_NUM_TOTAL_COUNT
}
ACCOUNT_SEARCH_HISTORY_DATE_QUERY = \
'SELECT sc.id, sc.search_ts, sc.api_criteria, sc.total_results_size, sc.returned_results_size,' + \
'sr.exact_match_count, sr.similar_match_count ' + \
'FROM search_requests sc, search_results sr ' + \
'WHERE sc.id = sr.search_id ' + \
"AND sc.account_id = '?' " + \
"AND sc.search_ts > ((now() at time zone 'utc') - interval '" + str(GET_HISTORY_DAYS_LIMIT) + " days') " + \
'ORDER BY sc.search_ts DESC ' + \
'FETCH FIRST ' + str(ACCOUNT_SEARCH_HISTORY_MAX_SIZE) + ' ROWS ONLY'
ACCOUNT_SEARCH_HISTORY_QUERY = \
'SELECT sc.id, sc.search_ts, sc.api_criteria, sc.total_results_size, sc.returned_results_size,' + \
'sr.exact_match_count, sr.similar_match_count ' + \
'FROM search_requests sc, search_results sr ' + \
'WHERE sc.id = sr.search_id ' + \
"AND sc.account_id = '?' " + \
'ORDER BY sc.search_ts DESC ' + \
'FETCH FIRST ' + str(ACCOUNT_SEARCH_HISTORY_MAX_SIZE) + ' ROWS ONLY'
| 48.609375
| 118
| 0.696078
| 1,858
| 12,444
| 4.419268
| 0.116254
| 0.016441
| 0.037998
| 0.043844
| 0.783583
| 0.772744
| 0.752892
| 0.724029
| 0.691755
| 0.671295
| 0
| 0.01218
| 0.208293
| 12,444
| 255
| 119
| 48.8
| 0.821153
| 0.135808
| 0
| 0.715686
| 0
| 0.053922
| 0.854478
| 0.188526
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f301ed1f95018bbf5d43822d3c38f6118815c6fc
| 33
|
py
|
Python
|
tmr_description/src/urdf_modification/__init__.py
|
kentsai0319/tmr_ros1_dev
|
2953f3317d2fdac8b1fc79ffbe661d3b978eb658
|
[
"BSD-3-Clause"
] | null | null | null |
tmr_description/src/urdf_modification/__init__.py
|
kentsai0319/tmr_ros1_dev
|
2953f3317d2fdac8b1fc79ffbe661d3b978eb658
|
[
"BSD-3-Clause"
] | null | null | null |
tmr_description/src/urdf_modification/__init__.py
|
kentsai0319/tmr_ros1_dev
|
2953f3317d2fdac8b1fc79ffbe661d3b978eb658
|
[
"BSD-3-Clause"
] | null | null | null |
from .urdf_modification import *
| 16.5
| 32
| 0.818182
| 4
| 33
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b87e84ea190a77d711f5373bbf7836621e8c7a01
| 195
|
py
|
Python
|
data_sets/synthetic_review_prediction/article_0/__init__.py
|
Octavian-ai/graph-node-categorizer
|
80fb2606ff2eebd0273cfaf756578330b6a87bb0
|
[
"MIT"
] | 1
|
2017-12-15T19:36:12.000Z
|
2017-12-15T19:36:12.000Z
|
data_sets/synthetic_review_prediction/article_0/__init__.py
|
Octavian-ai/basic-graph-connection
|
80fb2606ff2eebd0273cfaf756578330b6a87bb0
|
[
"MIT"
] | null | null | null |
data_sets/synthetic_review_prediction/article_0/__init__.py
|
Octavian-ai/basic-graph-connection
|
80fb2606ff2eebd0273cfaf756578330b6a87bb0
|
[
"MIT"
] | null | null | null |
from .configure import DATASET_NAME, create_data_set_properties
from .generate import run as _run
def run(client):
print(DATASET_NAME)
return _run(client, create_data_set_properties())
| 24.375
| 63
| 0.794872
| 28
| 195
| 5.178571
| 0.571429
| 0.151724
| 0.17931
| 0.317241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138462
| 195
| 7
| 64
| 27.857143
| 0.863095
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.8
| 0.2
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b89868403fb5e4ba6860e43dae51747244f03277
| 94
|
py
|
Python
|
refgenieserver/__init__.py
|
databio/refgenies
|
2f554de08566da6faaf6a78e06dfcb80bb609219
|
[
"BSD-2-Clause"
] | 5
|
2019-07-12T16:44:58.000Z
|
2020-02-03T06:04:03.000Z
|
refgenieserver/__init__.py
|
databio/refgenieserver
|
2f554de08566da6faaf6a78e06dfcb80bb609219
|
[
"BSD-2-Clause"
] | 53
|
2019-05-24T16:43:17.000Z
|
2020-06-18T14:35:51.000Z
|
refgenieserver/__init__.py
|
databio/refgenie_server
|
2f554de08566da6faaf6a78e06dfcb80bb609219
|
[
"BSD-2-Clause"
] | 1
|
2019-05-29T19:47:20.000Z
|
2019-05-29T19:47:20.000Z
|
from .const import *
from .helpers import *
from .main import *
from .server_builder import *
| 18.8
| 29
| 0.744681
| 13
| 94
| 5.307692
| 0.538462
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 94
| 4
| 30
| 23.5
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b2233a032e807f3e832c02081f15d61e8b2bfdc1
| 44
|
py
|
Python
|
code/Python/hello_world_my.py
|
souvickroy02/Hello-World
|
a25bea68b2a22e68c3dffb5824e36f3fce33a8d3
|
[
"MIT"
] | 63
|
2019-09-30T16:16:19.000Z
|
2021-06-17T17:23:06.000Z
|
code/Python/hello_world_my.py
|
souvickroy02/Hello-World
|
a25bea68b2a22e68c3dffb5824e36f3fce33a8d3
|
[
"MIT"
] | 242
|
2019-09-30T14:07:06.000Z
|
2020-10-01T13:52:13.000Z
|
code/Python/hello_world_my.py
|
souvickroy02/Hello-World
|
a25bea68b2a22e68c3dffb5824e36f3fce33a8d3
|
[
"MIT"
] | 743
|
2019-09-30T13:58:42.000Z
|
2021-12-29T21:58:28.000Z
|
print("Hello World") # prints "Hello World"
| 22
| 43
| 0.704545
| 6
| 44
| 5.166667
| 0.666667
| 0.645161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 44
| 1
| 44
| 44
| 0.815789
| 0.454545
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
b24c8eb72b047dfa1f0b814f732d1c9488a4e77b
| 8,920
|
py
|
Python
|
ProyectoMN/app/main.py
|
hakuruklis/Proyecto-Metodos-Numericos
|
1e67dcfd64f62bce9f898637635fa17e3a831dcb
|
[
"MIT"
] | null | null | null |
ProyectoMN/app/main.py
|
hakuruklis/Proyecto-Metodos-Numericos
|
1e67dcfd64f62bce9f898637635fa17e3a831dcb
|
[
"MIT"
] | null | null | null |
ProyectoMN/app/main.py
|
hakuruklis/Proyecto-Metodos-Numericos
|
1e67dcfd64f62bce9f898637635fa17e3a831dcb
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask import render_template
from flask import request
from flask import make_response
from flask import session
app = Flask(__name__)
app.secret_key = 'F12Zr47j\3yX R~X@H!jmM]Lwf/,?KT'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/iniciarTest', methods=['POST'])
def inicio():
if request.method == 'POST':
session['desde'] = float(request.form['desde'])
session['hasta'] = float(request.form['hasta'])
session['H']=float(request.form['N'])
if session['H'] == 2:
h=(session['hasta']-session['desde'])/session['H']
c11=session['desde']
c12=h+c11
c13=c12+h
respuesta = make_response(render_template('N2.html', c11=c11, c12=c12, c13=c13, h=h))
elif session['H'] == 3:
h=(session['hasta']-session['desde'])/session['H']
c11=session['desde']
c12=h+c11
c13=c12+h
c14=session['hasta']
respuesta = make_response(render_template('N3.html', c11=c11, c12=c12, c13=c13, c14=c14, h=h))
elif session['H']==4:
h = (session['hasta'] - session['desde']) / session['H']
c11 = session['desde']
c12 = h + c11
c13 = c12 + h
c14 = c13 + h
c15 = session['hasta']
respuesta = make_response(render_template('N4.html', c11=c11, c12=c12, c13=c13, c14=c14, c15=c15, h=h))
elif session['H']==5:
h = (session['hasta'] - session['desde']) / session['H']
c11 = session['desde']
c12 = h + c11
c13 = c12 + h
c14 = c13 + h
c15 = c14 + h
c16 = session['hasta']
respuesta = make_response(render_template('N5.html', c11=c11, c12=c12, c13=c13, c14=c14, c15=c15, c16=c16, h=h))
elif session['H']==6:
h = (session['hasta'] - session['desde']) / session['H']
c11 = session['desde']
c12 = h + c11
c13 = c12 + h
c14 = c13 + h
c15 = c14 + h
c16 = c15 + h
c17 = session['hasta']
respuesta = make_response(render_template('N6.html', c11=c11, c12=c12, c13=c13, c14=c14, c15=c15, c16=c16, c17=c17, h=h))
elif session['H']==7:
h = (session['hasta'] - session['desde']) / session['H']
c11 = session['desde']
c12 = h + c11
c13 = c12 + h
c14 = c13 + h
c15 = c14 + h
c16 = c15 + h
c17 = c16 + h
c18 = session['hasta']
respuesta = make_response(render_template('N7.html', c11=c11, c12=c12, c13=c13, c14=c14, c15=c15, c16=c16, c17=c17, c18=c18, h=h))
elif session['H']==8:
h = (session['hasta'] - session['desde']) / session['H']
c11 = session['desde']
c12 = h + c11
c13 = c12 + h
c14 = c13 + h
c15 = c14 + h
c16 = c15 + h
c17 = c16 + h
c18 = c17 + h
c19 = session['hasta']
respuesta = make_response(render_template('N8.html', c11=c11, c12=c12, c13=c13, c14=c14, c15=c15, c16=c16, c17=c17, c18=c18, c19=c19, h=h))
return respuesta
@app.route('/N2', methods=['POST'])
def N2():
valor21 = float(request.form['21'])
valor22 = float(request.form['22'])
valor23 = float(request.form['23'])
operacion=request.form['operacion']
if operacion == 'Trapecio':
resultado=((session['hasta']-session['desde'])/session['H'])*(valor21+(valor22*2)+valor23*2)
elif operacion == '1/3':
resultado=(((session['hasta']-session['desde'])/session['H'])*(1/3))*(valor21+(valor22*4)+valor23)
respuesta = make_response(render_template('Resultado.html', S=resultado))
return respuesta
@app.route('/N3', methods=['POST'])
def N3():
valor21 = float(request.form['21'])
valor22 = float(request.form['22'])
valor23 = float(request.form['23'])
valor24 = float(request.form['24'])
operacion=request.form['operacion']
if operacion == 'Trapecio':
resultado=((session['hasta']-session['desde'])/session['H'])*(0.5)*(valor21+(valor22*2)+(valor23*2)+valor24)
elif operacion == '3/8':
resultado=(((session['hasta']-session['desde'])/session['H'])*(3/8))*(valor21+(valor22*3)+(valor23*3)+valor24)
respuesta = make_response(render_template('Resultado.html', S=resultado))
return respuesta
@app.route('/N4', methods=['POST'])
def N4():
valor21 = float(request.form['21'])
valor22 = float(request.form['22'])
valor23 = float(request.form['23'])
valor24 = float(request.form['24'])
valor25 = float(request.form['25'])
operacion=request.form['operacion']
if operacion == 'Trapecio':
resultado=((session['hasta']-session['desde'])/session['H'])*(0.5)*(valor21+(valor22*2)+(valor23*2)+(valor24*2)+valor25)
elif operacion == '1/3':
resultado=(((session['hasta']-session['desde'])/session['H'])*(1/3))*(valor21+(valor23*4)+valor25)
respuesta = make_response(render_template('Resultado.html', S=resultado))
return respuesta
@app.route('/N5', methods=['POST'])
def N5():
valor21 = float(request.form['21'])
valor22 = float(request.form['22'])
valor23 = float(request.form['23'])
valor24 = float(request.form['24'])
valor25 = float(request.form['25'])
valor26 = float(request.form['26'])
operacion=request.form['operacion']
if operacion == 'Trapecio':
resultado=(((session['hasta']-session['desde'])/session['H'])*0.5)*(valor21+(valor22*2)+(valor23*2)+(valor24*2)+(valor25*2)+valor26)
elif operacion == '3/8':
resultado=(((session['hasta']-session['desde'])/session['H'])*(3/8))*(valor21+(valor22*3)+(valor23*3)+valor24)+(((session['hasta'] - session['desde']) / session['H']) * (1/3)) * (valor24 + (valor25 * 4) +valor26)
respuesta = make_response(render_template('Resultado.html', S=resultado))
return respuesta
@app.route('/N6', methods=['POST'])
def N6():
valor21 = float(request.form['21'])
valor22 = float(request.form['22'])
valor23 = float(request.form['23'])
valor24 = float(request.form['24'])
valor25 = float(request.form['25'])
valor26 = float(request.form['26'])
valor27 = float(request.form['27'])
operacion=request.form['operacion']
if operacion == 'Trapecio':
resultado=((session['hasta']-session['desde'])/session['H'])*(0.5)*(valor21+(valor22*2)+(valor23*2)+(valor24*2)+(valor25*2)+(valor26*2)+valor27)
elif operacion == '1/3':
resultado=(((session['hasta']-session['desde'])/2)*(1/3))*(valor21+(valor24*4)+valor27)
respuesta = make_response(render_template('Resultado.html', S=resultado))
return respuesta
@app.route('/N7', methods=['POST'])
def N7():
valor21 = float(request.form['21'])
valor22 = float(request.form['22'])
valor23 = float(request.form['23'])
valor24 = float(request.form['24'])
valor25 = float(request.form['25'])
valor26 = float(request.form['26'])
valor27 = float(request.form['27'])
valor28 = float(request.form['28'])
operacion = request.form['operacion']
if operacion == 'Trapecio':
resultado = (((session['hasta'] - session['desde']) / session['H']) * 0.5) * (
valor21 + (valor22 * 2) + (valor23 * 2) + (valor24 * 2) + (valor25 * 2) + (valor26*2) + (valor27*2) + valor28)
elif operacion == '3/8':
resultado = (((session['hasta'] - session['desde']) / session['H']) * (3 / 8)) * (
valor21 + (valor22 * 3) + (valor23 * 3) + (valor24 * 3) + (valor25 * 3) + (valor26*3) + (valor27*3) + valor28)
respuesta = make_response(render_template('Resultado.html', S=resultado))
return respuesta
@app.route('/N8', methods=['POST'])
def N8():
valor21 = float(request.form['21'])
valor22 = float(request.form['22'])
valor23 = float(request.form['23'])
valor24 = float(request.form['24'])
valor25 = float(request.form['25'])
valor26 = float(request.form['26'])
valor27 = float(request.form['27'])
valor28 = float(request.form['28'])
valor29 = float(request.form['29'])
operacion = request.form['operacion']
if operacion == 'Trapecio':
resultado = (((session['hasta'] - session['desde']) / session['H']) * 0.5) * (
valor21 + (valor22 * 2) + (valor23 * 2) + (valor24 * 2) + (valor25 * 2) + (valor26*2) + (valor27*2) + (valor28*2) + valor29)
elif operacion == '1/3':
resultado = (((session['hasta'] - session['desde']) / 2) * (1/3)) * (valor21 + (valor25 * 4) + valor29)
respuesta = make_response(render_template('Resultado.html', S=resultado))
return respuesta
@app.route('/Inicio', methods=['POST'])
def inicioo():
return render_template('index.html')
if __name__ == "__main__":
app.run(debug=True)
| 41.105991
| 220
| 0.576121
| 1,105
| 8,920
| 4.609955
| 0.082353
| 0.112289
| 0.141343
| 0.103651
| 0.857676
| 0.818414
| 0.818414
| 0.758736
| 0.752061
| 0.746565
| 0
| 0.107643
| 0.224103
| 8,920
| 216
| 221
| 41.296296
| 0.628377
| 0
| 0
| 0.637306
| 0
| 0
| 0.094731
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051813
| false
| 0
| 0.025907
| 0.010363
| 0.129534
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b2605a24626625fb91ca87d59aaf6bd768870172
| 137
|
py
|
Python
|
fastccd_support_ioc/utils/setClocksBiasOn.py
|
lbl-camera/fastccd_support_ioc
|
80b3820744e9aec7923af6adec0a66a0c51b2c21
|
[
"BSD-3-Clause"
] | null | null | null |
fastccd_support_ioc/utils/setClocksBiasOn.py
|
lbl-camera/fastccd_support_ioc
|
80b3820744e9aec7923af6adec0a66a0c51b2c21
|
[
"BSD-3-Clause"
] | 1
|
2020-08-07T22:22:25.000Z
|
2020-08-07T22:22:25.000Z
|
fastccd_support_ioc/utils/setClocksBiasOn.py
|
lbl-camera/fastccd_support_ioc
|
80b3820744e9aec7923af6adec0a66a0c51b2c21
|
[
"BSD-3-Clause"
] | 1
|
2021-02-08T22:06:05.000Z
|
2021-02-08T22:06:05.000Z
|
from fastccd_support_ioc.utils import cin_functions
cin_functions.WriteReg("8204", "0001", 1)
cin_functions.WriteReg("8205", "0009", 1)
| 27.4
| 51
| 0.781022
| 20
| 137
| 5.1
| 0.7
| 0.352941
| 0.392157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0.080292
| 137
| 4
| 52
| 34.25
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.116788
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b2967f09846ea808f68f13f98ee805a19a109e19
| 46
|
py
|
Python
|
tests/test_get_general_info.py
|
cemsinano/pykap
|
b49b2c53d40aa27b68186fb8b595dd41f3c5a21b
|
[
"MIT"
] | 2
|
2021-06-09T06:25:23.000Z
|
2022-02-14T06:42:54.000Z
|
tests/test_get_general_info.py
|
cemsinano/pykap
|
b49b2c53d40aa27b68186fb8b595dd41f3c5a21b
|
[
"MIT"
] | null | null | null |
tests/test_get_general_info.py
|
cemsinano/pykap
|
b49b2c53d40aa27b68186fb8b595dd41f3c5a21b
|
[
"MIT"
] | null | null | null |
def test_get_general_info():
assert False
| 15.333333
| 28
| 0.76087
| 7
| 46
| 4.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 46
| 2
| 29
| 23
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b2a2ee690d6b7d667d9f8b1689af65175a4897b6
| 20
|
py
|
Python
|
pddb/lib/__init__.py
|
genwch/pddb
|
d1f5ff9bfc29f1e9e4f3b6f53304b56224256f15
|
[
"MIT"
] | null | null | null |
pddb/lib/__init__.py
|
genwch/pddb
|
d1f5ff9bfc29f1e9e4f3b6f53304b56224256f15
|
[
"MIT"
] | null | null | null |
pddb/lib/__init__.py
|
genwch/pddb
|
d1f5ff9bfc29f1e9e4f3b6f53304b56224256f15
|
[
"MIT"
] | null | null | null |
from .pddb import *
| 10
| 19
| 0.7
| 3
| 20
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 1
| 20
| 20
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b2aa405f204e7c276030998aa3215886f32f9963
| 24,382
|
py
|
Python
|
wolf/flows/couplings/coupling.py
|
andrecianflone/wolf
|
826bbedc58d4d29871110349356868066a3108e6
|
[
"Apache-2.0"
] | 75
|
2020-03-31T22:21:04.000Z
|
2022-03-20T10:58:17.000Z
|
wolf/flows/couplings/coupling.py
|
andrecianflone/wolf
|
826bbedc58d4d29871110349356868066a3108e6
|
[
"Apache-2.0"
] | 3
|
2021-02-03T07:07:14.000Z
|
2022-03-08T20:58:43.000Z
|
wolf/flows/couplings/coupling.py
|
andrecianflone/wolf
|
826bbedc58d4d29871110349356868066a3108e6
|
[
"Apache-2.0"
] | 10
|
2020-04-27T05:31:44.000Z
|
2021-11-21T14:11:16.000Z
|
__author__ = 'max'
from overrides import overrides
from typing import Tuple, Dict
import torch
from wolf.flows.couplings.blocks import NICEConvBlock, MCFBlock, NICEMLPBlock
from wolf.flows.couplings.blocks import LocalLinearCondNet, GlobalLinearCondNet, GlobalAttnCondNet
from wolf.flows.flow import Flow
from wolf.flows.couplings.transform import Additive, Affine, NLSQ, ReLU, SymmELU
class NICE1d(Flow):
"""
NICE Flow for 1D data
"""
def __init__(self, in_features, hidden_features=None, inverse=False, split_type='continuous',
order='up', transform='affine', alpha=1.0, type='mlp', activation='elu'):
super(NICE1d, self).__init__(inverse)
self.in_features = in_features
self.factor = 2
assert split_type in ['continuous', 'skip']
assert in_features % self.factor == 0
assert order in ['up', 'down']
self.split_type = split_type
self.up = order == 'up'
if hidden_features is None:
hidden_features = min(8 * in_features, 512)
out_features = in_features // self.factor
in_features = in_features - out_features
self.z1_features = in_features if self.up else out_features
assert transform in ['additive', 'affine']
if transform == 'additive':
self.transform = Additive()
self.analytic_bwd = True
elif transform == 'affine':
self.transform = Affine(dim=-1, alpha=alpha)
self.analytic_bwd = True
out_features = out_features * 2
else:
raise ValueError('unknown transform: {}'.format(transform))
assert type in ['mlp']
if type == 'mlp':
self.net = NICEMLPBlock(in_features, out_features, hidden_features, activation)
def split(self, z):
split_dim = z.dim() - 1
split_type = self.split_type
dim = z.size(split_dim)
if split_type == 'continuous':
return z.split([self.z1_features, dim - self.z1_features], dim=split_dim)
elif split_type == 'skip':
idx1 = torch.tensor(list(range(0, dim, 2))).to(z.device)
idx2 = torch.tensor(list(range(1, dim, 2))).to(z.device)
z1 = z.index_select(split_dim, idx1)
z2 = z.index_select(split_dim, idx2)
return z1, z2
else:
raise ValueError('unknown split type: {}'.format(split_type))
def unsplit(self, z1, z2):
split_dim = z1.dim() - 1
split_type = self.split_type
if split_type == 'continuous':
return torch.cat([z1, z2], dim=split_dim)
elif split_type == 'skip':
z = torch.cat([z1, z2], dim=split_dim)
dim = z1.size(split_dim)
idx = torch.tensor([i // 2 if i % 2 == 0 else i // 2 + dim for i in range(dim * 2)]).to(z.device)
return z.index_select(split_dim, idx)
else:
raise ValueError('unknown split type: {}'.format(split_type))
def calc_params(self, z: torch.Tensor):
params = self.net(z)
return params
def init_net(self, z: torch.Tensor, init_scale=1.0):
params = self.net.init(z, init_scale=init_scale)
return params
@overrides
def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, in_channels, H, W]
Returns: out: Tensor , logdet: Tensor
out: [batch, in_channels, H, W], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
# [batch, length, in_channels]
z1, z2 = self.split(input)
# [batch, length, features]
z, zp = (z1, z2) if self.up else (z2, z1)
params = self.transform.calc_params(self.calc_params(z))
zp, logdet = self.transform.fwd(zp, params)
z1, z2 = (z, zp) if self.up else (zp, z)
return self.unsplit(z1, z2), logdet
@overrides
def backward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, in_channels, H, W]
Returns: out: Tensor , logdet: Tensor
out: [batch, in_channels, H, W], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
if self.analytic_bwd:
return self.backward_analytic(input)
else:
return self.backward_iterative(input)
def backward_analytic(self, z: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# [batch, length, in_channels]
z1, z2 = self.split(z)
# [batch, length, features]
z, zp = (z1, z2) if self.up else (z2, z1)
params = self.transform.calc_params(self.calc_params(z))
zp, logdet = self.transform.bwd(zp, params)
z1, z2 = (z, zp) if self.up else (zp, z)
return self.unsplit(z1, z2), logdet
def backward_iterative(self, z: torch.Tensor, maxIter=100) -> Tuple[torch.Tensor, torch.Tensor]:
# [batch, length, in_channels]
z1, z2 = self.split(z)
# [batch, length, features]
z, zp = (z1, z2) if self.up else (z2, z1)
params = self.transform.calc_params(self.calc_params(z))
zp_org = zp
eps = 1e-6
for iter in range(maxIter):
new_zp, logdet = self.transform.bwd(zp, params)
new_zp = zp_org - new_zp
diff = torch.abs(new_zp - zp).max().item()
zp = new_zp
if diff < eps:
break
_, logdet = self.transform.fwd(zp, params)
z1, z2 = (z, zp) if self.up else (zp, z)
return self.unsplit(z1, z2), logdet * -1.0
@overrides
def init(self, data: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
with torch.no_grad():
# [batch, length, in_channels]
z1, z2 = self.split(data)
# [batch, length, features]
z, zp = (z1, z2) if self.up else (z2, z1)
params = self.transform.calc_params(self.init_net(z, init_scale=init_scale))
zp, logdet = self.transform.fwd(zp, params)
z1, z2 = (z, zp) if self.up else (zp, z)
return self.unsplit(z1, z2), logdet
@overrides
def extra_repr(self):
return 'inverse={}, in_features={}, split={}, order={}, factor={}, transform={}'.format(self.inverse, self.in_features,
self.split_type, 'up' if self.up else 'down',
self.factor, self.transform)
@classmethod
def from_params(cls, params: Dict) -> "NICE1d":
return NICE1d(**params)
class NICE2d(Flow):
"""
NICE Flow for 2D image data
"""
def __init__(self, in_channels, hidden_channels=None, h_channels=0, inverse=False,
split_type='continuous', order='up', factor=2, transform='affine', alpha=1.0,
type='conv', h_type=None, activation='relu', normalize=None, num_groups=None):
super(NICE2d, self).__init__(inverse)
self.in_channels = in_channels
self.factor = factor
assert split_type in ['continuous', 'skip']
if split_type == 'skip':
assert factor == 2
if in_channels % factor == 1:
split_type = 'continuous'
assert order in ['up', 'down']
self.split_type = split_type
self.up = order == 'up'
if hidden_channels is None:
hidden_channels = min(8 * in_channels, 512)
out_channels = in_channels // factor
in_channels = in_channels - out_channels
self.z1_channels = in_channels if self.up else out_channels
assert transform in ['additive', 'affine', 'relu', 'nlsq', 'symm_elu']
if transform == 'additive':
self.transform = Additive()
self.analytic_bwd = True
elif transform == 'affine':
self.transform = Affine(dim=1, alpha=alpha)
self.analytic_bwd = True
out_channels = out_channels * 2
elif transform == 'relu':
self.transform = ReLU(dim=1)
self.analytic_bwd = True
out_channels = out_channels * 2
elif transform == 'nlsq':
self.transform = NLSQ(dim=1)
self.analytic_bwd = True
out_channels = out_channels * 5
elif transform == 'symm_elu':
self.transform = SymmELU(dim=1)
self.analytic_bwd = False
out_channels = out_channels * 2
else:
raise ValueError('unknown transform: {}'.format(transform))
assert type in ['conv']
if type == 'conv':
self.net = NICEConvBlock(in_channels, out_channels, hidden_channels, activation,
normalize=normalize, num_groups=num_groups)
assert h_type in [None, 'local_linear', 'global_linear', 'global_attn']
if h_type is None:
assert h_channels == 0
self.h_net = None
elif h_type == 'local_linear':
self.h_net = LocalLinearCondNet(h_channels, hidden_channels, kernel_size=3)
elif h_type == 'global_linear':
self.h_net = GlobalLinearCondNet(h_channels, hidden_channels)
elif h_type == 'global_attn':
self.h_net = GlobalAttnCondNet(h_channels, in_channels, hidden_channels)
else:
raise ValueError('unknown conditional transform: {}'.format(h_type))
def split(self, z):
split_dim = 1
split_type = self.split_type
dim = z.size(split_dim)
if split_type == 'continuous':
return z.split([self.z1_channels, dim - self.z1_channels], dim=split_dim)
elif split_type == 'skip':
idx1 = torch.tensor(list(range(0, dim, 2))).to(z.device)
idx2 = torch.tensor(list(range(1, dim, 2))).to(z.device)
z1 = z.index_select(split_dim, idx1)
z2 = z.index_select(split_dim, idx2)
return z1, z2
else:
raise ValueError('unknown split type: {}'.format(split_type))
def unsplit(self, z1, z2):
split_dim = 1
split_type = self.split_type
if split_type == 'continuous':
return torch.cat([z1, z2], dim=split_dim)
elif split_type == 'skip':
z = torch.cat([z1, z2], dim=split_dim)
dim = z1.size(split_dim)
idx = torch.tensor([i // 2 if i % 2 == 0 else i // 2 + dim for i in range(dim * 2)]).to(z.device)
return z.index_select(split_dim, idx)
else:
raise ValueError('unknown split type: {}'.format(split_type))
def calc_params(self, z: torch.Tensor, h=None):
params = self.net(z, h=h)
return params
def init_net(self, z: torch.Tensor, h=None, init_scale=1.0):
params = self.net.init(z, h=h, init_scale=init_scale)
return params
@overrides
def forward(self, input: torch.Tensor, h=None) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, in_channels, H, W]
h: Tensor
conditional input (default: None)
Returns: out: Tensor , logdet: Tensor
out: [batch, in_channels, H, W], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
# [batch, length, in_channels]
z1, z2 = self.split(input)
# [batch, length, features]
z, zp = (z1, z2) if self.up else (z2, z1)
if self.h_net is not None:
h = self.h_net(h, x=z)
else:
h = None
params = self.transform.calc_params(self.calc_params(z, h=h))
zp, logdet = self.transform.fwd(zp, params)
z1, z2 = (z, zp) if self.up else (zp, z)
return self.unsplit(z1, z2), logdet
@overrides
def backward(self, input: torch.Tensor, h=None) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, in_channels, H, W]
h: Tensor
conditional input (default: None)
Returns: out: Tensor , logdet: Tensor
out: [batch, in_channels, H, W], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
if self.analytic_bwd:
return self.backward_analytic(input, h=h)
else:
return self.backward_iterative(input, h=h)
def backward_analytic(self, z: torch.Tensor, h=None) -> Tuple[torch.Tensor, torch.Tensor]:
# [batch, length, in_channels]
z1, z2 = self.split(z)
# [batch, length, features]
z, zp = (z1, z2) if self.up else (z2, z1)
if self.h_net is not None:
h = self.h_net(h, x=z)
else:
h = None
params = self.transform.calc_params(self.calc_params(z, h=h))
zp, logdet = self.transform.bwd(zp, params)
z1, z2 = (z, zp) if self.up else (zp, z)
return self.unsplit(z1, z2), logdet
def backward_iterative(self, z: torch.Tensor, h=None, maxIter=100) -> Tuple[torch.Tensor, torch.Tensor]:
# [batch, length, in_channels]
z1, z2 = self.split(z)
# [batch, length, features]
z, zp = (z1, z2) if self.up else (z2, z1)
if self.h_net is not None:
h = self.h_net(h, x=z)
else:
h = None
params = self.transform.calc_params(self.calc_params(z, h=h))
zp_org = zp
eps = 1e-6
for iter in range(maxIter):
new_zp, logdet = self.transform.bwd(zp, params)
new_zp = zp_org - new_zp
diff = torch.abs(new_zp - zp).max().item()
zp = new_zp
if diff < eps:
break
_, logdet = self.transform.fwd(zp, params)
z1, z2 = (z, zp) if self.up else (zp, z)
return self.unsplit(z1, z2), logdet * -1.0
@overrides
def init(self, data: torch.Tensor, h=None, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
with torch.no_grad():
# [batch, length, in_channels]
z1, z2 = self.split(data)
# [batch, length, features]
z, zp = (z1, z2) if self.up else (z2, z1)
if self.h_net is not None:
h = self.h_net(h, x=z)
else:
h = None
params = self.transform.calc_params(self.init_net(z, h=h, init_scale=init_scale))
zp, logdet = self.transform.fwd(zp, params)
z1, z2 = (z, zp) if self.up else (zp, z)
return self.unsplit(z1, z2), logdet
@overrides
def extra_repr(self):
return 'inverse={}, in_channels={}, split={}, order={}, factor={}, transform={}'.format(self.inverse, self.in_channels,
self.split_type, 'up' if self.up else 'down',
self.factor, self.transform)
@classmethod
def from_params(cls, params: Dict) -> "NICE2d":
return NICE2d(**params)
class MaskedConvFlow(Flow):
"""
Masked Convolutional Flow
"""
def __init__(self, in_channels, kernel_size, hidden_channels=None, h_channels=None,
h_type=None, activation='relu', order='A', transform='affine', alpha=1.0, inverse=False):
super(MaskedConvFlow, self).__init__(inverse)
self.in_channels = in_channels
if hidden_channels is None:
if in_channels <= 96:
hidden_channels = 4 * in_channels
else:
hidden_channels = min(2 * in_channels, 512)
out_channels = in_channels
assert transform in ['additive', 'affine', 'relu', 'nlsq', 'symm_elu']
if transform == 'additive':
self.transform = Additive()
self.analytic_bwd = True
elif transform == 'affine':
self.transform = Affine(dim=1, alpha=alpha)
self.analytic_bwd = True
out_channels = out_channels * 2
elif transform == 'relu':
self.transform = ReLU(dim=1)
self.analytic_bwd = True
out_channels = out_channels * 2
elif transform == 'nlsq':
self.transform = NLSQ(dim=1)
self.analytic_bwd = True
out_channels = out_channels * 5
elif transform == 'symm_elu':
self.transform = SymmELU(dim=1)
self.analytic_bwd = False
out_channels = out_channels * 2
else:
raise ValueError('unknown transform: {}'.format(transform))
self.kernel_size = kernel_size
self.order = order
self.net = MCFBlock(in_channels, out_channels, kernel_size, hidden_channels, order, activation)
assert h_type in [None, 'local_linear', 'global_linear', 'global_attn']
if h_type is None:
assert h_channels is None or h_channels == 0
self.h_net = None
elif h_type == 'local_linear':
self.h_net = LocalLinearCondNet(h_channels, hidden_channels, kernel_size=3)
elif h_type == 'global_linear':
# TODO remove global linear
self.h_net = GlobalLinearCondNet(h_channels, hidden_channels)
elif h_type == 'global_attn':
# TODO add global attn
self.h_net = None
else:
raise ValueError('unknown conditional transform: {}'.format(h_type))
def calc_params(self, x: torch.Tensor, h=None, shifted=True):
params = self.net(x, h=h, shifted=shifted)
return params
def init_net(self, x, h=None, init_scale=1.0):
params = self.net.init(x, h=h, init_scale=init_scale)
return params
@overrides
def forward(self, input: torch.Tensor, h=None) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, in_channels, H, W]
h: Tensor
conditional input (default: None)
Returns: out: Tensor , logdet: Tensor
out: [batch, in_channels, H, W], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
if self.h_net is not None:
h = self.h_net(h)
else:
h = None
params = self.transform.calc_params(self.calc_params(input, h=h))
out, logdet = self.transform.fwd(input, params)
return out, logdet
@overrides
def backward(self, input: torch.Tensor, h=None) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, in_channels, H, W]
h: Tensor
conditional input (default: None)
Returns: out: Tensor , logdet: Tensor
out: [batch, in_channels, H, W], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
if self.analytic_bwd:
return self.backward_analytic(input, h=h)
else:
return self.backward_iterative(input, h=h)
def backward_analytic(self, z: torch.Tensor, h=None) -> Tuple[torch.Tensor, torch.Tensor]:
if self.h_net is not None:
bs, _, H, W = z.size()
h = self.h_net(h)
hh = h + h.new_zeros(bs, 1, H, W)
else:
h = hh = None
if self.order == 'A':
out = self.backward_height(z, hh=hh, reverse=False)
elif self.order == 'B':
out = self.backward_height(z, hh=hh, reverse=True)
elif self.order == 'C':
out = self.backward_width(z, hh=hh, reverse=False)
else:
out = self.backward_width(z, hh=hh, reverse=True)
params = self.transform.calc_params(self.calc_params(out, h=h))
_, logdet = self.transform.fwd(out, params)
return out, logdet.mul(-1.0)
def backward_iterative(self, z: torch.Tensor, h=None, maxIter=100) -> Tuple[torch.Tensor, torch.Tensor]:
if self.h_net is not None:
h = self.h_net(h)
else:
h = None
z_org = z
eps = 1e-6
for iter in range(maxIter):
params = self.transform.calc_params(self.calc_params(z, h=h))
new_z, logdet = self.transform.bwd(z, params)
new_z = z_org - new_z
diff = torch.abs(new_z - z).max().item()
z = new_z
if diff < eps:
break
params = self.transform.calc_params(self.calc_params(z, h=h))
z_recon, logdet = self.transform.fwd(z, params)
return z, logdet * -1.0
def backward_height(self, input: torch.Tensor, hh=None, reverse=False) -> torch.Tensor:
batch, channels, H, W = input.size()
kH, kW = self.kernel_size
cW = kW // 2
out = input.new_zeros(batch, channels, H + kH, W + 2 * cW)
itr = reversed(range(H)) if reverse else range(H)
for h in itr:
curr_h = h if reverse else h + kH
s_h = h + 1 if reverse else h
t_h = h + kH + 1 if reverse else h + kH
# [batch, channels, kH, width+2*cW]
out_curr = out[:, :, s_h:t_h]
hh_curr = None if hh is None else hh[:, :, h:h + 1]
# [batch, channels, width]
in_curr = input[:, :, h]
# [batch, channels, 1, width]
params = self.calc_params(out_curr, h=hh_curr, shifted=False)
params = self.transform.calc_params(params.squeeze(2))
# [batch, channels, width]
new_out, _ = self.transform.bwd(in_curr, params)
out[:, :, curr_h, cW:W + cW] = new_out
out = out[:, :, :H, cW:cW + W] if reverse else out[:, :, kH:, cW:cW + W]
return out
def backward_width(self, input: torch.Tensor, hh=None, reverse=False) -> torch.Tensor:
batch, channels, H, W = input.size()
kH, kW = self.kernel_size
cH = kH // 2
out = input.new_zeros(batch, channels, H + 2 * cH, W + kW)
itr = reversed(range(W)) if reverse else range(W)
for w in itr:
curr_w = w if reverse else w + kW
s_w = w + 1 if reverse else w
t_w = w + kW + 1 if reverse else w + kW
# [batch, channels, height+2*cH, kW]
out_curr = out[:, :, :, s_w:t_w]
hh_curr = None if hh is None else hh[:, :, :, w:w + 1]
# [batch, channels, height]
in_curr = input[:, :, :, w]
# [batch, channels, height, 1]
params = self.calc_params(out_curr, h=hh_curr, shifted=False)
params = self.transform.calc_params(params.squeeze(3))
# [batch, channels, height]
new_out, _ = self.transform.bwd(in_curr, params)
out[:, :, cH:H + cH, curr_w] = new_out
out = out[:, :, cH:cH + H, :W] if reverse else out[:, :, cH:cH + H, kW:]
return out
@overrides
def init(self, data, h=None, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
with torch.no_grad():
if self.h_net is not None:
h = self.h_net(h)
else:
h = None
params = self.transform.calc_params(self.init_net(data, h=h, init_scale=init_scale))
out, logdet = self.transform.fwd(data, params)
return out, logdet
@overrides
def extra_repr(self):
return 'inverse={}, in_channels={}, order={}, kernel={}, transform={}'.format(self.inverse, self.in_channels, self.order,
self.kernel_size, self.transform)
@classmethod
def from_params(cls, params: Dict) -> "MaskedConvFlow":
return MaskedConvFlow(**params)
NICE1d.register('nice1d')
NICE2d.register('nice2d')
MaskedConvFlow.register('masc')
| 38.763116
| 141
| 0.558896
| 3,125
| 24,382
| 4.22496
| 0.06016
| 0.049155
| 0.014542
| 0.018178
| 0.821934
| 0.770734
| 0.74574
| 0.732637
| 0.702568
| 0.673256
| 0
| 0.01539
| 0.323107
| 24,382
| 628
| 142
| 38.824841
| 0.784598
| 0.104544
| 0
| 0.663677
| 0
| 0
| 0.045075
| 0
| 0
| 0
| 0
| 0.001592
| 0.033632
| 1
| 0.080717
| false
| 0
| 0.015695
| 0.013453
| 0.192825
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a23490ff97b503e6685840388a20a84039860bb5
| 125
|
py
|
Python
|
spiral/data/__init__.py
|
acdaniells/spiral
|
d78344007969d7c991216901b4a9d3ad7d768587
|
[
"BSD-3-Clause"
] | null | null | null |
spiral/data/__init__.py
|
acdaniells/spiral
|
d78344007969d7c991216901b4a9d3ad7d768587
|
[
"BSD-3-Clause"
] | 1
|
2020-04-01T18:39:48.000Z
|
2020-04-01T18:39:48.000Z
|
spiral/data/__init__.py
|
acdaniells/spiral
|
d78344007969d7c991216901b4a9d3ad7d768587
|
[
"BSD-3-Clause"
] | 1
|
2020-04-01T18:36:44.000Z
|
2020-04-01T18:36:44.000Z
|
"""
Spiral data subpackage.
"""
from ._core import list_datasets, load_dataset
__all__ = ["list_datasets", "load_dataset"]
| 15.625
| 46
| 0.736
| 15
| 125
| 5.533333
| 0.733333
| 0.289157
| 0.385542
| 0.554217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128
| 125
| 7
| 47
| 17.857143
| 0.761468
| 0.184
| 0
| 0
| 0
| 0
| 0.265957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a240b5e9c363c1a88ea8bcf17217f918a1857e5d
| 34
|
py
|
Python
|
modules/geopy/geocoders/__init__.py
|
flavour/lacity
|
fd1f1cccdcea64d07143b29d4f88996e3af35c4b
|
[
"MIT"
] | 1
|
2016-01-01T12:22:48.000Z
|
2016-01-01T12:22:48.000Z
|
modules/geopy/geocoders/__init__.py
|
andygimma/eden
|
716d5e11ec0030493b582fa67d6f1c35de0af50d
|
[
"MIT"
] | null | null | null |
modules/geopy/geocoders/__init__.py
|
andygimma/eden
|
716d5e11ec0030493b582fa67d6f1c35de0af50d
|
[
"MIT"
] | 1
|
2020-04-29T13:58:31.000Z
|
2020-04-29T13:58:31.000Z
|
from geopy.geocoders_old import *
| 17
| 33
| 0.823529
| 5
| 34
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a255bb6e17c2034b50e2fced42c41c41dc3184e4
| 30,204
|
py
|
Python
|
postprocessing/pyplotgen/config/VariableGroupNondimMoments.py
|
larson-group/clubb_release
|
b4d671e3e238dbe00752c0dead6a0d4f9897350a
|
[
"Intel",
"Unlicense",
"NetCDF"
] | null | null | null |
postprocessing/pyplotgen/config/VariableGroupNondimMoments.py
|
larson-group/clubb_release
|
b4d671e3e238dbe00752c0dead6a0d4f9897350a
|
[
"Intel",
"Unlicense",
"NetCDF"
] | null | null | null |
postprocessing/pyplotgen/config/VariableGroupNondimMoments.py
|
larson-group/clubb_release
|
b4d671e3e238dbe00752c0dead6a0d4f9897350a
|
[
"Intel",
"Unlicense",
"NetCDF"
] | 1
|
2022-01-28T22:22:04.000Z
|
2022-01-28T22:22:04.000Z
|
"""
"""
import numpy as np
from netCDF4 import Dataset
from src.Panel import Panel
from src.VariableGroup import VariableGroup
class VariableGroupNondimMoments(VariableGroup):
"""
This class contains information for plotting normalized moments,
such as correlations or kurtosis.
"""
def __init__(self, case, clubb_datasets=None, sam_benchmark_dataset=None, coamps_benchmark_dataset=None,
wrf_benchmark_dataset=None, r408_dataset=None,
hoc_dataset=None, cam_datasets=None,
e3sm_datasets=None, sam_datasets=None, wrf_datasets=None, priority_vars=False):
"""
:param clubb_datasets:
:param case:
:param sam_benchmark_dataset:
"""
self.name = "normalized moments"
self.variable_definitions = [
{'var_names':
{
'clubb': [self.get_kurtosis_clubb],
'sam': [self.get_kurtosis_sam],
'coamps': [''],
'r408': [''],
'hoc': [''],
'e3sm': [''],
'cam': [''],
'wrf': [''],
},
'sci_scale': 0,
'priority': False,
'title': r'kurtosis, wp4/(wp2**2)',
'axis_title': r'kurtosis [-]',
},
{'var_names':
{
'clubb': [self.get_wpthlp_corr_clubb],
'sam': [self.get_wpthlp_corr_sam],
'coamps': [''],
'r408': [''],
'hoc': [''],
'e3sm': [''],
'cam': [''],
'wrf': [''],
},
'sci_scale': 0,
'priority': False,
'title': r'Correlation of w and thetal, wpthlp/sqrt(wp2*thlp2)',
'axis_title': r'Correlation of w and thetal [-]',
},
{'var_names':
{
'clubb': [self.get_wprtp_corr_clubb],
'sam': [self.get_wprtp_corr_sam],
'coamps': [''],
'r408': [''],
'hoc': [''],
'e3sm': [''],
'cam': [''],
'wrf': [''],
},
'sci_scale': 0,
'priority': False,
'title': r'Correlation of w and rt, wprtp/sqrt(wp2*rtp2)',
'axis_title': r'Correlation of w and rt [-]',
},
{'var_names':
{
'clubb': [self.get_wprcp_corr_clubb],
'sam': [self.get_wprcp_corr_sam],
'coamps': [''],
'r408': [''],
'hoc': [''],
'e3sm': [''],
'cam': [''],
'wrf': [''],
},
'sci_scale': 0,
'priority': False,
'title': r'Correlation of w and rc, wprcp/sqrt(wp2*rcp2)',
'axis_title': r'Correlation of w and rc [-]',
},
{'var_names':
{
'clubb': [self.get_upwp_corr_clubb],
'sam': [self.get_upwp_corr_sam],
'coamps': [''],
'r408': [''],
'hoc': [''],
'e3sm': [''],
'cam': [''],
'wrf': [''],
},
'sci_scale': 0,
'priority': False,
'title': r'Correlation of u and w, upwp/sqrt(up2*wp2)',
'axis_title': r'Correlation of u and w [-]',
},
{'var_names':
{
'clubb': [self.get_vpwp_corr_clubb],
'sam': [self.get_vpwp_corr_sam],
'coamps': [''],
'r408': [''],
'hoc': [''],
'e3sm': [''],
'cam': [''],
'wrf': [''],
},
'sci_scale': 0,
'priority': False,
'title': r'Correlation of v and w, vpwp/sqrt(vp2*wp2)',
'axis_title': r'Correlation of v and w [-]',
},
{'var_names':
{
'clubb': [self.get_nondim_wpthlp2_clubb],
'sam': [self.get_nondim_wpthlp2_sam],
'coamps': [''],
'r408': [''],
'hoc': [''],
'e3sm': [''],
'cam': [''],
'wrf': [''],
},
'sci_scale': 0,
'priority': False,
'title': r'Nondimensionalized wpthlp2, wpthlp2/(sqrt(wp2)*thlp2)',
'axis_title': r'Nondimensionalized wpthlp2 [-]',
},
{'var_names':
{
'clubb': [self.get_nondim_wprtp2_clubb],
'sam': [self.get_nondim_wprtp2_sam],
'coamps': [''],
'r408': [''],
'hoc': [''],
'e3sm': [''],
'cam': [''],
'wrf': [''],
},
'sci_scale': 0,
'priority': False,
'title': r'Nondimensionalized wprtp2, wprtp2/(sqrt(wp2)*rtp2)',
'axis_title': r'Nondimensionalized wprtp2 [-]',
},
{'var_names':
{
'clubb': [self.get_nondim_wp2thlp_clubb],
'sam': [self.get_nondim_wp2thlp_sam],
'coamps': [''],
'r408': [''],
'hoc': [''],
'e3sm': [''],
'cam': [''],
'wrf': [''],
},
'sci_scale': 0,
'priority': False,
'title': r'Nondimensionalized wp2thlp, wp2thlp/(wp2*sqrt(thlp2))',
'axis_title': r'Nondimensionalized wp2thlp [-]',
},
{'var_names':
{
'clubb': [self.get_nondim_wp2rtp_clubb],
'sam': [self.get_nondim_wp2rtp_sam],
'coamps': [''],
'r408': [''],
'hoc': [''],
'e3sm': [''],
'cam': [''],
'wrf': [''],
},
'sci_scale': 0,
'priority': False,
'title': r'Nondimensionalized wp2rtp, wp2rtp/(wp2*sqrt(rtp2))',
'axis_title': r'Nondimensionalized wp2rtp [-]',
},
]
# Call ctor of parent class
super().__init__(case, clubb_datasets=clubb_datasets, sam_datasets=sam_datasets, sam_benchmark_dataset=sam_benchmark_dataset,
coamps_benchmark_dataset=coamps_benchmark_dataset, wrf_benchmark_dataset=wrf_benchmark_dataset,
r408_dataset=r408_dataset, cam_datasets=cam_datasets,
hoc_dataset=hoc_dataset, e3sm_datasets=e3sm_datasets, wrf_datasets=wrf_datasets,
priority_vars=priority_vars)
def get_kurtosis_clubb(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being caluclated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
if dataset_override is not None:
dataset = dataset_override
else:
dataset = self.clubb_datasets['zm']
wp4, indep, dataset = self.getVarForCalculations('wp4', dataset)
wp2, indep, dataset = self.getVarForCalculations('wp2', dataset)
kurtosis = wp4 / ( wp2 * wp2 )
return kurtosis, indep
def get_kurtosis_sam(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model.
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being calculated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
dataset = self.sam_benchmark_dataset
if dataset_override is not None:
dataset = dataset_override
wp4, indep, dataset = self.getVarForCalculations('WP4', dataset)
wp2, indep, dataset = self.getVarForCalculations(['WP2', 'W2', 'wp2'], dataset)
kurtosis = wp4 / ( wp2 * wp2 )
return kurtosis, indep
def get_wpthlp_corr_clubb(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being caluclated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
if dataset_override is not None:
dataset = dataset_override
else:
dataset = self.clubb_datasets['zm']
wpthlp, indep, dataset = self.getVarForCalculations('wpthlp', dataset)
wp2, indep, dataset = self.getVarForCalculations('wp2', dataset)
thlp2, indep, dataset = self.getVarForCalculations('thlp2', dataset)
wpthlp_corr = wpthlp / np.sqrt( wp2 * thlp2 )
return wpthlp_corr, indep
def get_wpthlp_corr_sam(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model.
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being calculated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
dataset = self.sam_benchmark_dataset
if dataset_override is not None:
dataset = dataset_override
wp2, indep, dataset = self.getVarForCalculations(['WP2', 'W2', 'wp2'], dataset)
TL2, indep, dataset = self.getVarForCalculations('TL2', dataset)
THLP2_SGS, indep, dataset = self.getVarForCalculations('THLP2_SGS', dataset)
thlp2 = TL2 + THLP2_SGS
tlflux, indep, dataset = self.getVarForCalculations(['TLFLUX'], dataset)
rho, indep, dataset = self.getVarForCalculations(['RHO'], dataset)
wpthlp_sgs, indep, dataset = self.getVarForCalculations(['WPTHLP_SGS'], dataset)
wpthlp = (tlflux / (rho * 1004))
if not np.any(np.isnan(wpthlp_sgs)):
wpthlp += wpthlp_sgs
wpthlp_corr = wpthlp / np.sqrt( wp2 * thlp2 )
return wpthlp_corr, indep
def get_wprtp_corr_clubb(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being caluclated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
if dataset_override is not None:
dataset = dataset_override
else:
dataset = self.clubb_datasets['zm']
wprtp, indep, dataset = self.getVarForCalculations('wprtp', dataset)
wp2, indep, dataset = self.getVarForCalculations('wp2', dataset)
rtp2, indep, dataset = self.getVarForCalculations('rtp2', dataset)
wprtp_corr = wprtp / np.sqrt( wp2 * rtp2 )
return wprtp_corr, indep
def get_wprtp_corr_sam(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model.
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being calculated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
dataset = self.sam_benchmark_dataset
if dataset_override is not None:
dataset = dataset_override
wp2, indep, dataset = self.getVarForCalculations(['WP2', 'W2', 'wp2'], dataset)
QT2, indep, dataset = self.getVarForCalculations(['QT2'], dataset)
RTP2_SGS, indep, dataset = self.getVarForCalculations(['RTP2_SGS'], dataset)
rtp2 = (QT2 / 1e+6) + RTP2_SGS
qtflux, indep, dataset = self.getVarForCalculations(['QTFLUX'], dataset)
rho, indep, dataset = self.getVarForCalculations(['RHO'], dataset)
wprtp_sgs, indep, dataset = self.getVarForCalculations(['WPRTP_SGS'], dataset)
wprtp = qtflux / (rho * 2.5104e+6)
if not np.any(np.isnan(wprtp_sgs)):
wprtp += wprtp_sgs
wprtp_corr = wprtp / np.sqrt( wp2 * rtp2 )
return wprtp_corr, indep
def get_wprcp_corr_clubb(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being caluclated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
if dataset_override is not None:
dataset = dataset_override
else:
dataset = self.clubb_datasets['zm']
wprcp, indep, dataset = self.getVarForCalculations('wprcp', dataset)
wp2, indep, dataset = self.getVarForCalculations('wp2', dataset)
rcp2, indep, dataset = self.getVarForCalculations('rcp2', dataset)
wprcp_corr = wprcp / np.sqrt( wp2 * rcp2 )
return wprcp_corr, indep
def get_wprcp_corr_sam(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model.
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being calculated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
dataset = self.sam_benchmark_dataset
if dataset_override is not None:
dataset = dataset_override
wp2, indep, dataset = self.getVarForCalculations(['WP2', 'W2', 'wp2'], dataset)
rcp2, indep, dataset = self.getVarForCalculations(['RCP2'], dataset)
wprcp, indep, dataset = self.getVarForCalculations(['WPRCP'], dataset)
wprcp_corr = wprcp / np.sqrt( wp2 * rcp2 )
return wprcp_corr, indep
def get_upwp_corr_clubb(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being caluclated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
if dataset_override is not None:
dataset = dataset_override
else:
dataset = self.clubb_datasets['zm']
upwp, indep, dataset = self.getVarForCalculations('upwp', dataset)
wp2, indep, dataset = self.getVarForCalculations('wp2', dataset)
up2, indep, dataset = self.getVarForCalculations('up2', dataset)
upwp_corr = upwp / np.sqrt( wp2 * up2 )
return upwp_corr, indep
def get_upwp_corr_sam(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model.
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being calculated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
dataset = self.sam_benchmark_dataset
if dataset_override is not None:
dataset = dataset_override
wp2, indep, dataset = self.getVarForCalculations(['WP2', 'W2', 'wp2'], dataset)
U2, z, dataset = self.getVarForCalculations('U2', dataset)
UP2_SGS, z, dataset = self.getVarForCalculations('UP2_SGS', dataset)
up2 = UP2_SGS + U2
UW, z, dataset = self.getVarForCalculations('UW', dataset)
UPWP_SGS, z, dataset = self.getVarForCalculations('UPWP_SGS', dataset)
upwp = UW + UPWP_SGS
upwp_corr = upwp / np.sqrt( wp2 * up2 )
return upwp_corr, indep
def get_vpwp_corr_clubb(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being caluclated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
if dataset_override is not None:
dataset = dataset_override
else:
dataset = self.clubb_datasets['zm']
vpwp, indep, dataset = self.getVarForCalculations('vpwp', dataset)
wp2, indep, dataset = self.getVarForCalculations('wp2', dataset)
vp2, indep, dataset = self.getVarForCalculations('vp2', dataset)
vpwp_corr = vpwp / np.sqrt( wp2 * vp2 )
return vpwp_corr, indep
def get_vpwp_corr_sam(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model.
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being calculated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
dataset = self.sam_benchmark_dataset
if dataset_override is not None:
dataset = dataset_override
wp2, indep, dataset = self.getVarForCalculations(['WP2', 'W2', 'wp2'], dataset)
V2, z, dataset = self.getVarForCalculations('V2', dataset)
VP2_SGS, z, dataset = self.getVarForCalculations('VP2_SGS', dataset)
vp2 = V2 + VP2_SGS
VW, z, dataset = self.getVarForCalculations('VW', dataset)
VPWP_SGS, z, dataset = self.getVarForCalculations('VPWP_SGS', dataset)
vpwp = VW + VPWP_SGS
vpwp_corr = vpwp / np.sqrt( wp2 * vp2 )
return vpwp_corr, indep
def get_nondim_wpthlp2_clubb(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being caluclated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
if dataset_override is not None:
dataset = dataset_override
else:
dataset = self.clubb_datasets['zt']
wpthlp2, indep, dataset = self.getVarForCalculations('wpthlp2', dataset)
if dataset_override is not None:
dataset = dataset_override
else:
dataset = self.clubb_datasets['zm']
wp2, indep, dataset = self.getVarForCalculations('wp2', dataset)
thlp2, indep, dataset = self.getVarForCalculations('thlp2', dataset)
nondim_wpthlp2 = wpthlp2 / ( np.sqrt( wp2 ) * thlp2 )
return nondim_wpthlp2, indep
def get_nondim_wpthlp2_sam(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model.
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being calculated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
dataset = self.sam_benchmark_dataset
if dataset_override is not None:
dataset = dataset_override
wp2, indep, dataset = self.getVarForCalculations(['WP2', 'W2', 'wp2'], dataset)
TL2, indep, dataset = self.getVarForCalculations('TL2', dataset)
THLP2_SGS, indep, dataset = self.getVarForCalculations('THLP2_SGS', dataset)
thlp2 = TL2 + THLP2_SGS
wpthlp2, indep, dataset = self.getVarForCalculations(['WPTHLP2'], dataset)
nondim_wpthlp2 = wpthlp2 / ( np.sqrt( wp2 ) * thlp2 )
return nondim_wpthlp2, indep
def get_nondim_wprtp2_clubb(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being caluclated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
if dataset_override is not None:
dataset = dataset_override
else:
dataset = self.clubb_datasets['zt']
wprtp2, indep, dataset = self.getVarForCalculations('wprtp2', dataset)
if dataset_override is not None:
dataset = dataset_override
else:
dataset = self.clubb_datasets['zm']
wp2, indep, dataset = self.getVarForCalculations('wp2', dataset)
rtp2, indep, dataset = self.getVarForCalculations('rtp2', dataset)
nondim_wprtp2 = wprtp2 / ( np.sqrt( wp2 ) * rtp2 )
return nondim_wprtp2, indep
def get_nondim_wprtp2_sam(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model.
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being calculated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
dataset = self.sam_benchmark_dataset
if dataset_override is not None:
dataset = dataset_override
wp2, indep, dataset = self.getVarForCalculations(['WP2', 'W2', 'wp2'], dataset)
QT2, indep, dataset = self.getVarForCalculations(['QT2'], dataset)
RTP2_SGS, indep, dataset = self.getVarForCalculations(['RTP2_SGS'], dataset)
rtp2 = (QT2 / 1e+6) + RTP2_SGS
wprtp2, indep, dataset = self.getVarForCalculations(['WPRTP2'], dataset)
nondim_wprtp2 = wprtp2 / ( np.sqrt( wp2 ) * rtp2 )
return nondim_wprtp2, indep
def get_nondim_wp2thlp_clubb(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being caluclated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
if dataset_override is not None:
dataset = dataset_override
else:
dataset = self.clubb_datasets['zt']
wp2thlp, indep, dataset = self.getVarForCalculations('wp2thlp', dataset)
if dataset_override is not None:
dataset = dataset_override
else:
dataset = self.clubb_datasets['zm']
wp2, indep, dataset = self.getVarForCalculations('wp2', dataset)
thlp2, indep, dataset = self.getVarForCalculations('thlp2', dataset)
nondim_wp2thlp = wp2thlp / ( wp2 * np.sqrt( thlp2 ) )
return nondim_wp2thlp, indep
def get_nondim_wp2thlp_sam(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model.
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being calculated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
dataset = self.sam_benchmark_dataset
if dataset_override is not None:
dataset = dataset_override
wp2, indep, dataset = self.getVarForCalculations(['WP2', 'W2', 'wp2'], dataset)
TL2, indep, dataset = self.getVarForCalculations('TL2', dataset)
THLP2_SGS, indep, dataset = self.getVarForCalculations('THLP2_SGS', dataset)
thlp2 = TL2 + THLP2_SGS
wp2thlp, indep, dataset = self.getVarForCalculations(['WP2THLP'], dataset)
nondim_wp2thlp = wp2thlp / ( wp2 * np.sqrt( thlp2 ) )
return nondim_wp2thlp, indep
def get_nondim_wp2rtp_clubb(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being caluclated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
if dataset_override is not None:
dataset = dataset_override
else:
dataset = self.clubb_datasets['zt']
wp2rtp, indep, dataset = self.getVarForCalculations('wp2rtp', dataset)
if dataset_override is not None:
dataset = dataset_override
else:
dataset = self.clubb_datasets['zm']
wp2, indep, dataset = self.getVarForCalculations('wp2', dataset)
rtp2, indep, dataset = self.getVarForCalculations('rtp2', dataset)
nondim_wp2rtp = wp2rtp / ( wp2 * np.sqrt( rtp2 ) )
return nondim_wp2rtp, indep
def get_nondim_wp2rtp_sam(self, dataset_override=None):
"""
:param dataset_override: If passed, this netcdf dataset will be used to gather the data needed to calculate the
given variable. if not passed, this function should attempt to find the best source for the data, e.g.
the benchmark data for the given model.
:return: tuple of numeric lists of the form (dependent_data, independent_data) for the given variable being calculated.
Lists will be filled with NaN's if the variable could not be calculated.
"""
dataset = self.sam_benchmark_dataset
if dataset_override is not None:
dataset = dataset_override
wp2, indep, dataset = self.getVarForCalculations(['WP2', 'W2', 'wp2'], dataset)
QT2, indep, dataset = self.getVarForCalculations(['QT2'], dataset)
RTP2_SGS, indep, dataset = self.getVarForCalculations(['RTP2_SGS'], dataset)
rtp2 = (QT2 / 1e+6) + RTP2_SGS
wp2rtp, indep, dataset = self.getVarForCalculations(['WP2RTP'], dataset)
nondim_wp2rtp = wp2rtp / ( wp2 * np.sqrt( rtp2 ) )
return nondim_wp2rtp, indep
| 44.157895
| 133
| 0.595881
| 3,459
| 30,204
| 5.07719
| 0.043654
| 0.060756
| 0.131192
| 0.134837
| 0.89688
| 0.846771
| 0.833447
| 0.794272
| 0.777816
| 0.777702
| 0
| 0.016837
| 0.311747
| 30,204
| 683
| 134
| 44.222548
| 0.827978
| 0.306648
| 0
| 0.63747
| 0
| 0
| 0.09518
| 0.005974
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051095
| false
| 0
| 0.009732
| 0
| 0.111922
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a25dd159a122d0bd52fb9c0dbd27643c2721f9b9
| 180
|
py
|
Python
|
test/test_nabla_version.py
|
AlbanAndrieu/nabla-hooks
|
44ffbb834ebcce59b9e1c23f3789a00cbb12ac6f
|
[
"Apache-2.0"
] | null | null | null |
test/test_nabla_version.py
|
AlbanAndrieu/nabla-hooks
|
44ffbb834ebcce59b9e1c23f3789a00cbb12ac6f
|
[
"Apache-2.0"
] | 5
|
2021-04-07T20:43:26.000Z
|
2022-03-01T08:42:41.000Z
|
test/test_nabla_version.py
|
AlbanAndrieu/nabla-hooks
|
44ffbb834ebcce59b9e1c23f3789a00cbb12ac6f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import re
from hooks import __version__
def test_version():
# assert __version__ == '1.0.2'
assert re.match(r'^v1.0.2.+$', __version__) # nosec
| 18
| 56
| 0.627778
| 26
| 180
| 3.846154
| 0.692308
| 0.04
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048276
| 0.194444
| 180
| 9
| 57
| 20
| 0.641379
| 0.316667
| 0
| 0
| 0
| 0
| 0.084034
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a270d21d3dc61380b695da03858471846f869cd7
| 29
|
py
|
Python
|
src/__init__.py
|
goodarzilab/ciberatac
|
58c150813cfdf1cea160b9b2c464c382cb0f7395
|
[
"BSD-3-Clause"
] | 3
|
2022-02-25T19:24:52.000Z
|
2022-03-22T16:48:07.000Z
|
src/__init__.py
|
goodarzilab/ciberatac
|
58c150813cfdf1cea160b9b2c464c382cb0f7395
|
[
"BSD-3-Clause"
] | null | null | null |
src/__init__.py
|
goodarzilab/ciberatac
|
58c150813cfdf1cea160b9b2c464c382cb0f7395
|
[
"BSD-3-Clause"
] | null | null | null |
import ciberatac
import mave
| 9.666667
| 16
| 0.862069
| 4
| 29
| 6.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 2
| 17
| 14.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a296e9f4f9437379d0bce6ee891c6e014eddae96
| 46,008
|
py
|
Python
|
Part2_Probabilistic_Models/w2_unittest.py
|
picsag/NLP
|
7fe8ec5cf9636fbbe1d5dd077455f4db62800ec9
|
[
"MIT"
] | null | null | null |
Part2_Probabilistic_Models/w2_unittest.py
|
picsag/NLP
|
7fe8ec5cf9636fbbe1d5dd077455f4db62800ec9
|
[
"MIT"
] | null | null | null |
Part2_Probabilistic_Models/w2_unittest.py
|
picsag/NLP
|
7fe8ec5cf9636fbbe1d5dd077455f4db62800ec9
|
[
"MIT"
] | null | null | null |
from utils_pos import get_word_tag, preprocess
import pandas as pd
from collections import defaultdict
import math
import numpy as np
import pickle
def test_create_dictionaries(target, training_corpus, vocab):
successful_cases = 0
failed_cases = []
test_cases = [
{
"name": "default_case",
"input": {
"training_corpus": training_corpus,
"vocab": vocab,
"verbose": False,
},
"expected": {
"len_emission_counts": 31140,
"len_transition_counts": 1421,
"len_tag_counts": 46,
"emission_counts": {
("DT", "the"): 41098,
("NNP", "--unk_upper--"): 4635,
("NNS", "Arts"): 2,
},
"transition_counts": {
("VBN", "TO"): 2142,
("CC", "IN"): 1227,
("VBN", "JJR"): 66,
},
"tag_counts": {"PRP": 17436, "UH": 97, ")": 1376,},
},
},
{
"name": "small_case",
"input": {
"training_corpus": training_corpus[:1000],
"vocab": vocab,
"verbose": False,
},
"expected": {
"len_emission_counts": 442,
"len_transition_counts": 272,
"len_tag_counts": 38,
"emission_counts": {
("DT", "the"): 48,
("NNP", "--unk_upper--"): 9,
("NNS", "Arts"): 1,
},
"transition_counts": {
("VBN", "TO"): 3,
("CC", "IN"): 2,
("VBN", "JJR"): 1,
},
"tag_counts": {"PRP": 11, "UH": 0, ")": 2,},
},
},
]
for test_case in test_cases:
result_emission, result_transition, result_tag = target(**test_case["input"])
# emission dictionary
try:
assert isinstance(result_emission, defaultdict)
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": defaultdict,
"got": type(result_emission),
}
)
print(
f"Wrong output type for emission_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')} \n\tGot: {failed_cases[-1].get('got')}."
)
try:
assert len(result_emission) == test_case["expected"]["len_emission_counts"]
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["len_emission_counts"],
"got": len(result_emission),
}
)
print(
f"Wrong output length for emission_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')} \n\tGot: {failed_cases[-1].get('got')}."
)
try:
for k, v in test_case["expected"]["emission_counts"].items():
assert np.isclose(result_emission[k], v)
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["emission_counts"],
"got": result_emission,
}
)
print(
f"Wrong output values for emission_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')}."
)
# transition dictionary
try:
assert isinstance(result_transition, defaultdict)
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": defaultdict,
"got": type(result_transition),
}
)
print(
f"Wrong output type for transition_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')} \n\tGot: {failed_cases[-1].get('got')}."
)
try:
assert (
len(result_transition) == test_case["expected"]["len_transition_counts"]
)
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["len_transition_counts"],
"got": len(result_transition),
}
)
print(
f"Wrong output length for transition_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')} \n\tGot: {failed_cases[-1].get('got')}."
)
try:
for k, v in test_case["expected"]["transition_counts"].items():
assert np.isclose(result_transition[k], v)
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["transition_counts"],
"got": result_transition,
}
)
print(
f"Wrong output values for transition_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')}."
)
# tags count
try:
assert isinstance(result_tag, defaultdict)
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": defaultdict,
"got": type(result_transition),
}
)
print(
f"Wrong output type for tag_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')} \n\tGot: {failed_cases[-1].get('got')}."
)
try:
assert len(result_tag) == test_case["expected"]["len_tag_counts"]
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["len_tag_counts"],
"got": len(result_tag),
}
)
print(
f"Wrong output length for tag_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')} \n\tGot: {failed_cases[-1].get('got')}."
)
try:
for k, v in test_case["expected"]["tag_counts"].items():
assert np.isclose(result_tag[k], v)
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["tag_counts"],
"got": result_tag,
}
)
print(
f"Wrong output values for tag_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')}."
)
if len(failed_cases) == 0:
print("\033[92m All tests passed")
else:
print("\033[92m", successful_cases, " Tests passed")
print("\033[91m", len(failed_cases), " Tests failed")
# return failed_cases, len(failed_cases) + successful_cases
def test_predict_pos(target, prep, y, emission_counts, vocab, states):
successful_cases = 0
failed_cases = []
test_cases = [
{
"name": "default_check",
"input": {
"prep": prep,
"y": y,
"emission_counts": emission_counts,
"vocab": vocab,
"states": states,
},
"expected": 0.8888563993099213,
},
{
"name": "small_check",
"input": {
"prep": prep[:1000],
"y": y[:1000],
"emission_counts": emission_counts,
"vocab": vocab,
"states": states,
},
"expected": 0.876,
},
]
for test_case in test_cases:
result = target(**test_case["input"])
try:
assert np.isclose(result, test_case["expected"])
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"],
"got": result,
}
)
print(
f"Wrong output values for tag_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
if len(failed_cases) == 0:
print("\033[92m All tests passed")
else:
print("\033[92m", successful_cases, " Tests passed")
print("\033[91m", len(failed_cases), " Tests failed")
# return failed_cases, len(failed_cases) + successful_cases
def test_create_transition_matrix(target, tag_counts, transition_counts):
successful_cases = 0
failed_cases = []
test_cases = [
{
"name": "default_check",
"input": {
"alpha": 0.001,
"tag_counts": tag_counts,
"transition_counts": transition_counts,
},
"expected": {
"0:5": np.array(
[
[
7.03997297e-06,
7.03997297e-06,
7.03997297e-06,
7.03997297e-06,
7.03997297e-06,
],
[
1.35647553e-07,
1.35647553e-07,
1.35647553e-07,
1.35647553e-07,
1.35647553e-07,
],
[
1.44528595e-07,
1.44673124e-04,
6.93751711e-03,
6.79298851e-03,
5.05864537e-03,
],
[
7.32039770e-07,
1.69101919e-01,
7.32039770e-07,
7.32039770e-07,
7.32039770e-07,
],
[
7.26719892e-07,
7.27446612e-04,
7.26719892e-07,
7.27446612e-04,
7.26719892e-07,
],
]
),
"30:35": np.array(
[
[
2.21706877e-06,
2.21706877e-06,
2.21706877e-06,
8.87049214e-03,
2.21706877e-06,
],
[
3.75650909e-07,
7.51677469e-04,
3.75650909e-07,
5.10888993e-02,
3.75650909e-07,
],
[
1.72277159e-05,
1.72277159e-05,
1.72277159e-05,
1.72277159e-05,
1.72277159e-05,
],
[
4.47733569e-05,
4.47286283e-08,
4.47286283e-08,
8.95019852e-05,
4.47733569e-05,
],
[
1.03043917e-05,
1.03043917e-05,
1.03043917e-05,
6.18366548e-02,
3.09234796e-02,
],
]
),
},
},
{
"name": "alpha_check",
"input": {
"alpha": 0.05,
"tag_counts": tag_counts,
"transition_counts": transition_counts,
},
"expected": {
"0:5": np.array(
[
[
3.46500347e-04,
3.46500347e-04,
3.46500347e-04,
3.46500347e-04,
3.46500347e-04,
],
[
6.78030457e-06,
6.78030457e-06,
6.78030457e-06,
6.78030457e-06,
6.78030457e-06,
],
[
7.22407640e-06,
1.51705604e-04,
6.94233742e-03,
6.79785589e-03,
5.06407756e-03,
],
[
3.65416941e-05,
1.68859168e-01,
3.65416941e-05,
3.65416941e-05,
3.65416941e-05,
],
[
3.62765726e-05,
7.61808024e-04,
3.62765726e-05,
7.61808024e-04,
3.62765726e-05,
],
]
),
"30:35": np.array(
[
[
1.10302228e-04,
1.10302228e-04,
1.10302228e-04,
8.93448048e-03,
1.10302228e-04,
],
[
1.87666554e-05,
7.69432872e-04,
1.87666554e-05,
5.10640694e-02,
1.87666554e-05,
],
[
8.29187396e-04,
8.29187396e-04,
8.29187396e-04,
8.29187396e-04,
8.29187396e-04,
],
[
4.69603252e-05,
2.23620596e-06,
2.23620596e-06,
9.16844445e-05,
4.69603252e-05,
],
[
5.03524673e-04,
5.03524673e-04,
5.03524673e-04,
6.09264854e-02,
3.07150050e-02,
],
]
),
},
},
]
for test_case in test_cases:
result = target(**test_case["input"])
try:
assert isinstance(result, np.ndarray)
successful_cases += 1
except:
failed_cases.append(
{"name": test_case["name"], "expected": np.ndarray, "got": type(result),}
)
print(
f"Wrong output type .\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(result[0:5, 0:5], test_case["expected"]["0:5"])
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["0:5"],
"got": result[0:5, 0:5],
}
)
print(
f"Wrong output values in rows and columns with indexes between 0 and 5.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(result[30:35, 30:35], test_case["expected"]["30:35"])
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["30:35"],
"got": result[30:35, 30:35],
}
)
print(
f"Wrong output values in rows and columns with indexes between 30 and 35.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
if len(failed_cases) == 0:
print("\033[92m All tests passed")
else:
print("\033[92m", successful_cases, " Tests passed")
print("\033[91m", len(failed_cases), " Tests failed")
# return failed_cases, len(failed_cases) + successful_cases
def test_create_emission_matrix(target, tag_counts, emission_counts, vocab):
successful_cases = 0
failed_cases = []
test_cases = [
{
"name": "default_check",
"input": {
"alpha": 0.001,
"tag_counts": tag_counts,
"emission_counts": emission_counts,
"vocab": vocab,
},
"expected": {
"0:5": np.array(
[
[
6.03219988e-06,
6.03219988e-06,
8.56578416e-01,
6.03219988e-06,
6.03219988e-06,
],
[
1.35212298e-07,
1.35212298e-07,
1.35212298e-07,
9.71365280e-01,
1.35212298e-07,
],
[
1.44034584e-07,
1.44034584e-07,
1.44034584e-07,
1.44034584e-07,
1.44034584e-07,
],
[
7.19539897e-07,
7.19539897e-07,
7.19539897e-07,
7.19539897e-07,
7.19539897e-07,
],
[
7.14399508e-07,
7.14399508e-07,
7.14399508e-07,
7.14399508e-07,
7.14399508e-07,
],
]
),
"30:35": np.array(
[
[
2.10625199e-06,
2.10625199e-06,
2.10625199e-06,
2.10625199e-06,
2.10625199e-06,
],
[
3.72331731e-07,
3.72331731e-07,
3.72331731e-07,
3.72331731e-07,
3.72331731e-07,
],
[
1.22283772e-05,
1.22406055e-02,
1.22283772e-05,
1.22283772e-05,
1.22283772e-05,
],
[
4.46812012e-08,
4.46812012e-08,
4.46812012e-08,
4.46812012e-08,
4.46812012e-08,
],
[
8.27972213e-06,
4.96866125e-02,
8.27972213e-06,
8.27972213e-06,
8.27972213e-06,
],
]
),
},
},
{
"name": "alpha_check",
"input": {
"alpha": 0.05,
"tag_counts": tag_counts,
"emission_counts": emission_counts,
"vocab": vocab,
},
"expected": {
"0:5": np.array(
[
[
3.75699741e-05,
3.75699741e-05,
1.06736296e-01,
3.75699741e-05,
3.75699741e-05,
],
[
5.84054154e-06,
5.84054154e-06,
5.84054154e-06,
8.39174848e-01,
5.84054154e-06,
],
[
6.16686298e-06,
6.16686298e-06,
6.16686298e-06,
6.16686298e-06,
6.16686298e-06,
],
[
1.95706206e-05,
1.95706206e-05,
1.95706206e-05,
1.95706206e-05,
1.95706206e-05,
],
[
1.94943174e-05,
1.94943174e-05,
1.94943174e-05,
1.94943174e-05,
1.94943174e-05,
],
]
),
"30:35": np.array(
[
[
3.04905937e-05,
3.04905937e-05,
3.04905937e-05,
3.04905937e-05,
3.04905937e-05,
],
[
1.29841464e-05,
1.29841464e-05,
1.29841464e-05,
1.29841464e-05,
1.29841464e-05,
],
[
4.01010547e-05,
8.42122148e-04,
4.01010547e-05,
4.01010547e-05,
4.01010547e-05,
],
[
2.12351646e-06,
2.12351646e-06,
2.12351646e-06,
2.12351646e-06,
2.12351646e-06,
],
[
3.88847844e-05,
4.70505891e-03,
3.88847844e-05,
3.88847844e-05,
3.88847844e-05,
],
]
),
},
},
]
for test_case in test_cases:
result = target(**test_case["input"])
try:
assert isinstance(result, np.ndarray)
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": np.ndarray,
"got": type(result),
}
)
print(
f"Wrong output type .\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(result[0:5, 0:5], test_case["expected"]["0:5"])
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["0:5"],
"got": result[0:5, 0:5],
}
)
print(
f"Wrong output values in rows and columns with indexes between 0 and 5.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(result[30:35, 30:35], test_case["expected"]["30:35"])
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["30:35"],
"got": result[30:35, 30:35],
}
)
print(
f"Wrong output values in rows and columns with indexes between 30 and 35.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
if len(failed_cases) == 0:
print("\033[92m All tests passed")
else:
print("\033[92m", successful_cases, " Tests passed")
print("\033[91m", len(failed_cases), " Tests failed")
# return failed_cases, len(failed_cases) + successful_cases
def test_initialize(target, states, tag_counts, A, B, corpus, vocab):
successful_cases = 0
failed_cases = []
test_cases = [
{
"name": "default_check",
"input": {
"states": states,
"tag_counts": tag_counts,
"A": A,
"B": B,
"corpus": corpus,
"vocab": vocab,
},
"expected": {
"best_probs_shape": (46, 34199),
"best_paths_shape": (46, 34199),
"best_probs_col0": np.array(
[
-22.60982633,
-23.07660654,
-23.57298822,
-19.76726066,
-24.74325104,
-35.20241402,
-35.00096024,
-34.99203854,
-21.35069072,
-19.85767814,
-21.92098414,
-4.01623741,
-19.16380593,
-21.1062242,
-20.47163973,
-21.10157273,
-21.49584851,
-20.4811853,
-18.25856307,
-23.39717471,
-21.92146798,
-9.41377777,
-21.03053445,
-21.08029591,
-20.10863677,
-33.48185979,
-19.47301382,
-20.77150242,
-20.11727696,
-20.56031676,
-20.57193964,
-32.30366295,
-18.07551522,
-22.58887909,
-19.1585905,
-16.02994331,
-24.30968545,
-20.92932218,
-21.96797222,
-24.29571895,
-23.45968569,
-22.43665883,
-20.46568904,
-22.75551606,
-19.6637215,
-18.36288463,
]
),
},
}
]
for test_case in test_cases:
result_best_probs, result_best_paths = target(**test_case["input"])
try:
assert isinstance(result_best_probs, np.ndarray)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]) + "index 0",
"expected": np.ndarray,
"got": type(result_best_probs),
}
)
print(
f"Wrong output type .\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert isinstance(result_best_paths, np.ndarray)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]) + "index 1",
"expected": np.ndarray,
"got": type(result_best_paths),
}
)
print(
f"Wrong output type .\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert result_best_probs.shape == test_case["expected"]["best_probs_shape"]
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["best_probs_shape"],
"got": result_best_probs.shape,
}
)
print(
f"Wrong output shape for best_probs.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert result_best_paths.shape == test_case["expected"]["best_paths_shape"]
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["best_paths_shape"],
"got": result_best_paths.shape,
}
)
print(
f"Wrong output shape for best_paths.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(
result_best_probs[:, 0], test_case["expected"]["best_probs_col0"]
)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["best_probs_col0"],
"got": result_best_probs[:, 0],
}
)
print(
f"Wrong non-zero values for best_probs.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.all((result_best_paths == 0))
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": "Array of zeros with shape (46, 34199)",
}
)
print(
f"Wrong values for best_paths.\n\t Expected: {failed_cases[-1].get('expected')}."
)
if len(failed_cases) == 0:
print("\033[92m All tests passed")
else:
print("\033[92m", successful_cases, " Tests passed")
print("\033[91m", len(failed_cases), " Tests failed")
# return failed_cases, len(failed_cases) + successful_cases
def test_viterbi_forward(target, A, B, test_corpus, vocab):
successful_cases = 0
failed_cases = []
test_cases = [
{
"name": "default_check",
"input": {
"A": A,
"B": B,
"test_corpus": test_corpus,
"best_probs": pickle.load(
open("./support_files/best_probs_initilized.pkl", "rb")
),
"best_paths": pickle.load(
open("./support_files/best_paths_initilized.pkl", "rb")
),
"vocab": vocab,
"verbose": False,
},
"expected": {
"best_probs0:5": np.array(
[
[
-22.60982633,
-24.78215633,
-34.08246498,
-34.34107105,
-49.56012613,
],
[
-23.07660654,
-24.51583896,
-35.04774303,
-35.28281026,
-50.52540418,
],
[
-23.57298822,
-29.98305064,
-31.98004656,
-38.99187549,
-47.45770771,
],
[
-19.76726066,
-25.7122143,
-31.54577612,
-37.38331695,
-47.02343727,
],
[
-24.74325104,
-28.78696025,
-31.458494,
-36.00456711,
-46.93615515,
],
]
),
"best_probs30:35": np.array(
[
[
-202.75618827,
-208.38838519,
-210.46938402,
-210.15943098,
-223.79223672,
],
[
-202.58297597,
-217.72266765,
-207.23725672,
-215.529735,
-224.13957203,
],
[
-202.00878092,
-214.23093833,
-217.41021623,
-220.73768708,
-222.03338753,
],
[
-200.44016117,
-209.46937757,
-209.06951664,
-216.22297765,
-221.09669653,
],
[
-208.74189499,
-214.62088817,
-209.79346523,
-213.52623459,
-228.70417526,
],
]
),
"best_paths0:5": np.array(
[
[0, 11, 20, 25, 20],
[0, 11, 20, 25, 20],
[0, 11, 20, 25, 20],
[0, 11, 20, 25, 20],
[0, 11, 20, 25, 20],
]
),
"best_paths30:35": np.array(
[
[20, 19, 35, 11, 21],
[20, 19, 35, 11, 21],
[20, 19, 35, 11, 21],
[20, 19, 35, 11, 21],
[35, 19, 35, 11, 34],
]
),
},
}
]
for test_case in test_cases:
result_best_probs, result_best_paths = target(**test_case["input"])
try:
assert isinstance(result_best_probs, np.ndarray)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]) + "index 0",
"expected": np.ndarray,
"got": type(result_best_probs),
}
)
print(
f"Wrong output type .\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert isinstance(result_best_paths, np.ndarray)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]) + "index 1",
"expected": np.ndarray,
"got": type(result_best_paths),
}
)
print(
f"Wrong output type .\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(
result_best_probs[0:5, 0:5], test_case["expected"]["best_probs0:5"]
)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["best_probs0:5"],
"got": result_best_probs[0:5, 0:5],
}
)
print(
f"Wrong values for best_probs.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(
result_best_probs[30:35, 30:35],
test_case["expected"]["best_probs30:35"],
)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["best_probs30:35"],
"got": result_best_probs[:, 0],
}
)
print(
f"Wrong values for best_probs.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(
result_best_paths[0:5, 0:5], test_case["expected"]["best_paths0:5"],
)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["best_paths0:5"],
"got": result_best_paths[0:5, 0:5],
}
)
print(
f"Wrong values for best_paths.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(
result_best_paths[30:35, 30:35],
test_case["expected"]["best_paths30:35"],
)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["best_paths30:35"],
"got": result_best_paths[30:35, 30:35],
}
)
print(
f"Wrong values for best_paths.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
if len(failed_cases) == 0:
print("\033[92m All tests passed")
else:
print("\033[92m", successful_cases, " Tests passed")
print("\033[91m", len(failed_cases), " Tests failed")
# return failed_cases, len(failed_cases) + successful_cases
def test_viterbi_backward(target, corpus, states):
successful_cases = 0
failed_cases = []
test_cases = [
{
"name": "default_check",
"input": {
"corpus": corpus,
"best_probs": pickle.load(
open("./support_files/best_probs_trained.pkl", "rb")
),
"best_paths": pickle.load(
open("./support_files/best_paths_trained.pkl", "rb")
),
"states": states,
},
"expected": {
"pred_len": 34199,
"pred_head": [
"DT",
"NN",
"POS",
"NN",
"MD",
"VB",
"VBN",
"IN",
"JJ",
"NN",
],
"pred_tail": [
"PRP",
"MD",
"RB",
"VB",
"PRP",
"RB",
"IN",
"PRP",
".",
"--s--",
],
},
}
]
for test_case in test_cases:
result = target(**test_case["input"])
try:
assert isinstance(result, list)
successful_cases += 1
except:
failed_cases.append(
{"name": str(test_case["name"]), "expected": list, "got": type(result)}
)
print(
f"Wrong output type .\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert len(result) == test_case["expected"]["pred_len"]
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["pred_len"],
"got": len(result),
}
)
print(
f"Wrong output lenght.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert result[:10] == test_case["expected"]["pred_head"]
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["pred_head"],
"got": result[:10],
}
)
print(
f"Wrong values for pred list.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert result[-10:] == test_case["expected"]["pred_tail"]
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["pred_tail"],
"got": result[-10:],
}
)
print(
f"Wrong values for pred list.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
if len(failed_cases) == 0:
print("\033[92m All tests passed")
else:
print("\033[92m", successful_cases, " Tests passed")
print("\033[91m", len(failed_cases), " Tests failed")
# return failed_cases, len(failed_cases) + successful_cases
def test_compute_accuracy(target, pred, y):
successful_cases = 0
failed_cases = []
test_cases = [
{
"name": "default_check",
"input": {"pred": pred, "y": y},
"expected": 0.953063647155511,
},
{
"name": "small_check",
"input": {"pred": pred[:100], "y": y[:100]},
"expected": 0.979381443298969,
},
]
for test_case in test_cases:
result = target(**test_case["input"])
try:
assert isinstance(result, float)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": float,
"got": type(result),
}
)
print(
f"Wrong output type.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.isclose(result, test_case["expected"])
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": float,
"got": type(result),
}
)
print(
f"Wrong output type.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
if len(failed_cases) == 0:
print("\033[92m All tests passed")
else:
print("\033[92m", successful_cases, " Tests passed")
print("\033[91m", len(failed_cases), " Tests failed")
# return failed_cases, len(failed_cases) + successful_cases
| 34.723019
| 180
| 0.364937
| 3,686
| 46,008
| 4.403147
| 0.10662
| 0.093531
| 0.04732
| 0.05915
| 0.803697
| 0.787307
| 0.7191
| 0.707332
| 0.688848
| 0.6809
| 0
| 0.176473
| 0.522366
| 46,008
| 1,324
| 181
| 34.749245
| 0.562093
| 0.011215
| 0
| 0.564165
| 0
| 0.026634
| 0.168646
| 0.053254
| 0
| 0
| 0
| 0
| 0.027441
| 1
| 0.006457
| false
| 0.012914
| 0.004843
| 0
| 0.011299
| 0.046812
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a2d9fdd419fa036f52175d9e3415841d8f5f5c1f
| 4,055
|
py
|
Python
|
tests/features/meta/tests/test_aggregators.py
|
kevinbazira/revscoring
|
625f8b8048eb3c0c1c872ed9c15687c56f125747
|
[
"MIT"
] | 49
|
2015-07-15T14:53:06.000Z
|
2018-08-20T15:00:31.000Z
|
tests/features/meta/tests/test_aggregators.py
|
kevinbazira/revscoring
|
625f8b8048eb3c0c1c872ed9c15687c56f125747
|
[
"MIT"
] | 224
|
2015-06-14T23:22:43.000Z
|
2018-08-08T22:52:46.000Z
|
tests/features/meta/tests/test_aggregators.py
|
kevinbazira/revscoring
|
625f8b8048eb3c0c1c872ed9c15687c56f125747
|
[
"MIT"
] | 36
|
2015-07-03T03:25:01.000Z
|
2018-05-25T10:21:08.000Z
|
import pickle
from revscoring.datasources import Datasource
from revscoring.dependencies import solve
from revscoring.features.meta import aggregators
def test_sum():
my_list = Datasource("my_list")
my_sum = aggregators.sum(my_list)
cache = {my_list: [1, 2, 3, 4]}
assert solve(my_sum, cache=cache) == 10
cache = {my_list: []}
assert solve(my_sum, cache=cache) == 0
cache = {my_list: None}
assert solve(my_sum, cache=cache) == 0
assert str(my_sum) == "feature.sum(<datasource.my_list>)"
assert pickle.loads(pickle.dumps(my_sum)) == my_sum
def test_sum_vectors():
my_list = Datasource("my_list")
my_sum = aggregators.sum(my_list, vector=True)
cache = {my_list: [[1, 2, 3], [4, 5, 6]]}
assert all(a == b for a, b in
zip(solve(my_sum, cache=cache), [5, 7, 9]))
cache = {my_list: [[]]}
assert solve(my_sum, cache=cache) == [0]
cache = {my_list: [None]}
assert solve(my_sum, cache=cache) == [0]
assert str(my_sum) == "feature_vector.sum(<datasource.my_list>)"
assert pickle.loads(pickle.dumps(my_sum)) == my_sum
def test_min():
my_list = Datasource("my_list")
my_min = aggregators.min(my_list)
cache = {my_list: [1, 2, 3, 4]}
assert solve(my_min, cache=cache) == 1
cache = {my_list: []}
assert solve(my_min, cache=cache) == 0
cache = {my_list: None}
assert solve(my_min, cache=cache) == 0
assert pickle.loads(pickle.dumps(my_min)) == my_min
def test_min_vectors():
my_list = Datasource("my_list")
my_min = aggregators.min(my_list, vector=True)
cache = {my_list: [[1, 2, 3], [4, 5, 6]]}
assert all(a == b for a, b in
zip(solve(my_min, cache=cache), [1, 2, 3]))
cache = {my_list: [[]]}
assert solve(my_min, cache=cache) == [0]
cache = {my_list: [None]}
assert solve(my_min, cache=cache) == [0]
assert pickle.loads(pickle.dumps(my_min)) == my_min
def test_max():
my_list = Datasource("my_list")
my_max = aggregators.max(my_list)
cache = {my_list: [1, 2, 3, 4]}
assert solve(my_max, cache=cache) == 4
cache = {my_list: []}
assert solve(my_max, cache=cache) == 0
cache = {my_list: None}
assert solve(my_max, cache=cache) == 0
assert pickle.loads(pickle.dumps(my_max)) == my_max
def test_max_vectors():
my_list = Datasource("my_list")
my_max = aggregators.max(my_list, vector=True)
cache = {my_list: [[1, 2, 3], [4, 5, 6]]}
assert all(a == b for a, b in
zip(solve(my_max, cache=cache), [4, 5, 6]))
cache = {my_list: [[]]}
assert solve(my_max, cache=cache) == [0]
cache = {my_list: [None]}
assert solve(my_max, cache=cache) == [0]
assert pickle.loads(pickle.dumps(my_max)) == my_max
def test_len():
my_list = Datasource("my_list")
my_len = aggregators.len(my_list)
cache = {my_list: [1, 2, 3, 4]}
assert solve(my_len, cache=cache) == 4
cache = {my_list: []}
assert solve(my_len, cache=cache) == 0
cache = {my_list: None}
assert solve(my_len, cache=cache) == 0
assert pickle.loads(pickle.dumps(my_len)) == my_len
def test_len_vectors():
my_list = Datasource("my_list")
my_len = aggregators.len(my_list, vector=True)
cache = {my_list: [[1, 2, 3], [4, 5, 6]]}
assert all(a == b for a, b in
zip(solve(my_len, cache=cache), [2, 2, 2]))
cache = {my_list: [[]]}
assert solve(my_len, cache=cache) == [0]
cache = {my_list: [None]}
assert solve(my_len, cache=cache) == [0]
assert pickle.loads(pickle.dumps(my_len)) == my_len
def test_mean_vectors():
my_list = Datasource("my_list")
my_mean = aggregators.mean(my_list, vector=True)
cache = {my_list: [[1, 2, 3], [4, 5, 6]]}
assert all(a == b for a, b in
zip(solve(my_mean, cache=cache), [2.5, 3.5, 4.5]))
cache = {my_list: [[]]}
assert solve(my_mean, cache=cache) == [0]
cache = {my_list: [None]}
assert solve(my_mean, cache=cache) == [0]
assert pickle.loads(pickle.dumps(my_mean)) == my_mean
| 31.192308
| 68
| 0.614303
| 637
| 4,055
| 3.711146
| 0.072214
| 0.142132
| 0.125635
| 0.068528
| 0.906514
| 0.889594
| 0.859137
| 0.825296
| 0.825296
| 0.798646
| 0
| 0.027488
| 0.219482
| 4,055
| 129
| 69
| 31.434109
| 0.719431
| 0
| 0
| 0.663366
| 0
| 0
| 0.033539
| 0.018002
| 0
| 0
| 0
| 0
| 0.376238
| 1
| 0.089109
| false
| 0
| 0.039604
| 0
| 0.128713
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a2f587a4e8ce39c071b8b287c0d083d8bb6c6391
| 36,729
|
py
|
Python
|
pyke/krb_compiler/krbparser_tables.py
|
jhidding/pyke
|
0fbf34612a7648b5b096de4a6749325e205dfc2a
|
[
"MIT"
] | null | null | null |
pyke/krb_compiler/krbparser_tables.py
|
jhidding/pyke
|
0fbf34612a7648b5b096de4a6749325e205dfc2a
|
[
"MIT"
] | null | null | null |
pyke/krb_compiler/krbparser_tables.py
|
jhidding/pyke
|
0fbf34612a7648b5b096de4a6749325e205dfc2a
|
[
"MIT"
] | null | null | null |
# /home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser_tables.py
# This file is automatically generated. Do not edit.
_tabversion = '3.2'
_lr_method = 'LALR'
_lr_signature = '`\xa3O\x17\xd6C\xd4E2\xb5\xf60wIM\xd9'
_lr_action_items = {'TAKING_TOK':([142,161,187,216,],[-66,188,-66,240,]),'LP_TOK':([18,32,43,65,73,85,88,95,111,112,124,125,128,132,144,150,158,162,165,170,171,179,180,181,182,183,184,185,189,192,193,196,197,199,204,205,206,207,210,212,214,218,219,220,224,225,226,231,233,234,235,236,238,239,244,245,251,253,254,255,257,258,260,266,268,269,272,273,275,276,278,279,283,284,289,292,295,296,300,301,304,307,308,310,312,],[32,43,65,65,43,65,43,111,43,43,-94,43,-79,43,-111,-32,43,43,192,43,43,-91,-51,-76,-50,-11,212,43,43,43,-38,43,43,226,231,43,-51,-50,43,43,-57,-33,-37,-36,-31,-21,43,43,-59,43,-108,255,43,43,-40,-34,266,43,-11,43,-11,-11,-30,43,-11,-60,-52,-25,-56,-16,-39,43,-53,-58,-61,43,-54,-63,-35,-55,43,-11,-65,-62,-64,]),'FOREACH_TOK':([61,],[79,]),'AS_TOK':([256,267,285,294,],[271,271,271,271,]),'ANONYMOUS_VAR_TOK':([32,43,65,66,73,85,88,100,106,111,112,124,125,128,132,144,150,158,162,170,171,179,181,183,185,189,192,193,196,197,205,210,212,214,218,219,220,224,225,226,231,233,234,235,238,239,244,245,253,254,255,257,258,260,266,268,269,272,273,275,276,278,279,283,284,289,292,295,296,300,301,304,307,308,310,312,],[46,46,46,46,46,46,46,46,46,46,46,-94,46,-79,46,-111,-32,46,46,46,46,-91,-76,-11,46,46,46,-38,46,46,46,46,46,-57,-33,-37,-36,-31,-21,46,46,-59,46,-108,46,46,-40,-34,46,-11,46,-11,-11,-30,46,-11,-60,-52,-25,-56,-16,-39,46,-53,-58,-61,46,-54,-63,-35,-55,46,-11,-65,-62,-64,]),'NUMBER_TOK':([32,43,65,73,85,88,111,112,124,125,128,132,144,150,158,162,170,171,179,181,183,185,189,192,193,196,197,205,210,212,214,218,219,220,224,225,226,231,233,234,235,238,239,244,245,253,254,255,257,258,260,266,268,269,272,273,274,275,276,278,279,283,284,289,292,295,296,300,301,304,307,308,310,312,],[44,44,44,44,44,44,44,44,-94,44,-79,44,-111,-32,44,44,44,44,-91,-76,-11,44,44,44,-38,44,44,44,44,44,-57,-33,-37,-36,-31,-21,44,44,-59,44,-108,44,44,-40,-34,44,-11,44,-11,-11,-30,44,-11,-60,-52,-25,288,-56,-16,-39,44,-53,-58,-61,44,-54,-63,-35,-55,44,-11,-65,-62,-64,]),'DEINDENT_TOK':([94,107,109,114,119,121,124,125,128,144,150,153,154,159,160,172,173,179,181,183,189,193,196,197,209,214,218,219,220,224,225,229,233,235,241,244,245,250,254,257,258,260,261,265,268,269,272,273,275,276,277,278,280,281,283,284,289,291,292,293,295,296,300,301,305,307,308,310,311,312,],[-104,118,-106,135,138,140,-94,143,-79,-111,-32,-90,173,-49,-48,-107,198,-91,-76,209,218,-38,224,225,-9,-57,-33,-37,-36,-31,-21,250,-59,-108,-85,-40,-34,-15,269,275,276,-30,278,-43,284,-60,-52,-25,-56,-16,291,-39,-41,293,-53,-58,-61,-86,300,-42,-54,-63,-35,-55,308,310,-65,-62,312,-64,]),'STEP_TOK':([256,267,285,294,],[274,274,274,274,]),'EXTENDING_TOK':([0,3,6,],[-22,-23,9,]),'ASSERT_TOK':([61,80,143,],[-101,97,-29,]),'INDENT_TOK':([33,37,39,58,59,60,62,77,96,113,122,139,141,145,151,152,157,160,168,194,200,208,213,215,227,232,262,273,287,298,299,303,],[-69,61,-69,75,76,-69,81,93,112,134,-10,-68,158,162,170,171,176,187,-70,222,-70,234,238,239,248,253,279,-67,297,-67,304,306,]),'.':([7,129,155,180,182,184,204,206,207,],[10,147,174,-51,-50,211,230,-51,-50,]),'!':([158,179,181,183,185,193,205,210,214,219,220,233,234,235,238,239,244,253,254,257,258,268,269,272,273,275,276,278,283,284,289,295,296,301,304,307,308,310,312,],[177,-91,-76,-11,177,-38,177,177,-57,-37,-36,-59,177,-108,177,177,-40,177,-11,-11,-11,-11,-60,-52,-25,-56,-16,-39,-53,-58,-61,-54,-63,-55,177,-11,-65,-62,-64,]),'IN_TOK':([44,46,48,49,50,52,53,54,55,56,67,99,103,104,127,129,136,137,146,180,182,],[-78,-75,-74,-89,-80,-88,-81,-116,-20,-84,-100,-117,-121,-120,-66,-87,-118,-119,163,-74,-87,]),'NOTANY_TOK':([112,124,125,128,132,144,150,158,162,170,171,179,181,183,185,189,193,196,197,205,210,214,218,219,220,224,225,233,234,235,238,239,244,245,253,254,257,258,260,268,269,272,273,275,276,278,279,283,284,289,292,295,296,300,301,304,307,308,310,312,],[126,-94,126,-79,126,-111,-32,178,126,126,126,-91,-76,-11,178,126,-38,126,126,178,178,-57,-33,-37,-36,-31,-21,-59,178,-108,178,178,-40,-34,178,-11,-11,-11,-30,-11,-60,-52,-25,-56,-16,-39,126,-53,-58,-61,126,-54,-63,-35,-55,178,-11,-65,-62,-64,]),'WITHOUT_TOK':([17,],[30,]),'*':([43,65,85,88,],[66,66,100,106,]),',':([40,41,44,45,46,47,48,49,50,52,53,54,55,56,57,67,68,69,70,71,82,83,90,99,101,102,103,104,105,136,137,],[-98,63,-78,-87,-75,-96,-74,-89,-80,-88,-81,-116,-20,-84,73,-100,85,-97,-93,88,-115,85,-113,-117,-110,88,-121,-120,-114,-118,-119,]),'BC_EXTRAS_TOK':([11,16,21,140,],[19,-92,-109,-45,]),'CODE_TOK':([75,81,93,148,163,164,176,188,195,222,228,240,248,297,306,],[91,91,91,166,166,166,202,166,166,166,166,166,166,202,202,]),'REQUIRE_TOK':([225,276,],[246,290,]),'PATTERN_VAR_TOK':([32,43,65,66,73,85,88,100,106,111,112,124,125,128,132,144,150,158,162,170,171,177,179,181,183,185,189,192,193,196,197,205,210,211,212,214,218,219,220,224,225,226,230,231,233,234,235,238,239,244,245,253,254,255,257,258,260,266,268,269,271,272,273,275,276,278,279,283,284,289,292,295,296,300,301,304,307,308,310,312,],[48,48,48,48,48,48,48,48,48,48,48,-94,48,-79,48,-111,-32,180,48,48,48,206,-91,-76,-11,180,48,48,-38,48,48,180,180,206,48,-57,-33,-37,-36,-31,-21,48,206,48,-59,180,-108,180,180,-40,-34,180,-11,48,-11,-11,-30,48,-11,-60,286,-52,-25,-56,-16,-39,48,-53,-58,-61,48,-54,-63,-35,-55,180,-11,-65,-62,-64,]),'TRUE_TOK':([32,43,65,73,85,88,111,112,124,125,128,132,144,150,158,162,170,171,179,181,183,185,189,192,193,196,197,205,210,212,214,218,219,220,224,225,226,231,233,234,235,238,239,244,245,253,254,255,257,258,260,266,268,269,272,273,275,276,278,279,283,284,289,292,295,296,300,301,304,307,308,310,312,],[49,49,49,49,49,49,49,49,-94,49,-79,49,-111,-32,49,49,49,49,-91,-76,-11,49,49,49,-38,49,49,49,49,49,-57,-33,-37,-36,-31,-21,49,49,-59,49,-108,49,49,-40,-34,49,-11,49,-11,-11,-30,49,-11,-60,-52,-25,-56,-16,-39,49,-53,-58,-61,49,-54,-63,-35,-55,49,-11,-65,-62,-64,]),'PLAN_EXTRAS_TOK':([11,16,21,22,118,140,],[-99,-92,-109,36,-12,-45,]),':':([12,20,],[23,23,]),'=':([44,46,48,49,50,52,53,54,55,56,67,99,103,104,127,129,136,137,146,180,182,],[-78,-75,-74,-89,-80,-88,-81,-116,-20,-84,-100,-117,-121,-120,-66,-87,-118,-119,164,-74,-87,]),'NOT_NL_TOK':([149,169,175,201,],[-66,195,-66,228,]),'$end':([2,4,5,11,13,14,15,16,21,22,25,26,27,29,35,38,72,118,135,138,140,198,],[0,-1,-2,-99,-95,-46,-6,-92,-109,-103,-4,-46,-112,-77,-47,-5,-3,-12,-13,-14,-45,-28,]),'PYTHON_TOK':([112,124,125,128,131,132,134,144,150,153,154,156,158,162,170,171,172,179,181,183,185,189,193,196,197,205,210,214,218,219,220,224,225,233,234,235,238,239,244,245,253,254,257,258,260,265,268,269,272,273,275,276,278,279,280,283,284,289,292,293,295,296,300,301,304,307,308,310,312,],[-44,-94,-44,-79,149,-44,-44,-111,-32,-90,-44,175,-44,-44,-44,-44,-107,-91,-76,-11,-44,-44,-38,-44,-44,-44,-44,-57,-33,-37,-36,-31,-21,-59,-44,-108,-44,-44,-40,-34,-44,-11,-11,-11,-30,-43,-11,-60,-52,-25,-56,-16,-39,-44,-41,-53,-58,-61,-44,-42,-54,-63,-35,-55,-44,-11,-65,-62,-64,]),'USE_TOK':([61,76,],[78,78,]),'WITH_TOK':([94,109,159,160,209,241,291,],[-104,120,-49,-48,-9,-85,-86,]),'FALSE_TOK':([32,43,65,73,85,88,111,112,124,125,128,132,144,150,158,162,170,171,179,181,183,185,189,192,193,196,197,205,210,212,214,218,219,220,224,225,226,231,233,234,235,238,239,244,245,253,254,255,257,258,260,266,268,269,272,273,275,276,278,279,283,284,289,292,295,296,300,301,304,307,308,310,312,],[52,52,52,52,52,52,52,52,-94,52,-79,52,-111,-32,52,52,52,52,-91,-76,-11,52,52,52,-38,52,52,52,52,52,-57,-33,-37,-36,-31,-21,52,52,-59,52,-108,52,52,-40,-34,52,-11,52,-11,-11,-30,52,-11,-60,-52,-25,-56,-16,-39,52,-53,-58,-61,52,-54,-63,-35,-55,52,-11,-65,-62,-64,]),'CHECK_TOK':([0,112,124,125,128,130,132,144,150,158,162,170,171,179,181,183,185,189,193,196,197,205,210,214,218,219,220,224,225,233,234,235,238,239,244,245,253,254,257,258,260,268,269,272,273,275,276,278,279,283,284,289,292,295,296,300,301,304,307,308,310,312,],[1,-66,-94,-66,-79,148,-66,-111,-32,-66,-66,-66,-66,-91,-76,-11,-66,-66,-38,-66,-66,-66,-66,-57,-33,-37,-36,-31,-21,-59,-66,-108,-66,-66,-40,-34,-66,-11,-11,-11,-30,-11,-60,-52,-25,-56,-16,-39,-66,-53,-58,-61,-66,-54,-63,-35,-55,-66,-11,-65,-62,-64,]),'IDENTIFIER_TOK':([0,1,3,6,8,9,10,11,13,14,16,21,26,27,30,32,42,43,63,65,73,78,85,88,111,112,124,125,128,132,134,135,140,144,147,150,153,154,158,162,170,171,172,174,177,179,181,183,185,189,192,193,196,197,198,205,210,211,212,214,218,219,220,224,225,226,230,231,233,234,235,238,239,244,245,253,254,255,257,258,260,265,266,268,269,272,273,275,276,278,279,280,283,284,289,292,293,295,296,300,301,304,307,308,310,312,],[-22,7,-23,-24,12,17,18,20,-95,12,-92,-109,20,-112,40,45,-7,45,82,45,45,95,45,45,45,129,-94,129,-79,129,155,-13,-45,-111,165,-32,-90,155,182,129,129,129,-107,199,207,-91,-76,-11,182,129,45,-38,129,129,-28,182,182,207,45,-57,-33,-37,-36,-31,-21,45,207,45,-59,182,-108,182,182,-40,-34,182,-11,45,-11,-11,-30,-43,45,-11,-60,-52,-25,-56,-16,-39,129,-41,-53,-58,-61,129,-42,-54,-63,-35,-55,182,-11,-65,-62,-64,]),'NONE_TOK':([32,43,65,73,85,88,111,112,124,125,128,132,144,150,158,162,170,171,179,181,183,185,189,192,193,196,197,205,210,212,214,218,219,220,224,225,226,231,233,234,235,238,239,244,245,253,254,255,257,258,260,266,268,269,272,273,275,276,278,279,283,284,289,292,295,296,300,301,304,307,308,310,312,],[55,55,55,55,55,55,55,55,-94,55,-79,55,-111,-32,55,55,55,55,-91,-76,-11,55,55,55,-38,55,55,55,55,55,-57,-33,-37,-36,-31,-21,55,55,-59,55,-108,55,55,-40,-34,55,-11,55,-11,-11,-30,55,-11,-60,-52,-25,-56,-16,-39,55,-53,-58,-61,55,-54,-63,-35,-55,55,-11,-65,-62,-64,]),'FORALL_TOK':([112,124,125,128,132,144,150,158,162,170,171,179,181,183,185,189,193,196,197,205,210,214,218,219,220,224,225,233,234,235,238,239,244,245,253,254,257,258,260,268,269,272,273,275,276,278,279,283,284,289,292,295,296,300,301,304,307,308,310,312,],[133,-94,133,-79,133,-111,-32,186,133,133,133,-91,-76,-11,186,133,-38,133,133,186,186,-57,-33,-37,-36,-31,-21,-59,186,-108,186,186,-40,-34,186,-11,-11,-11,-30,-11,-60,-52,-25,-56,-16,-39,133,-53,-58,-61,133,-54,-63,-35,-55,186,-11,-65,-62,-64,]),'STRING_TOK':([32,43,65,73,85,88,111,112,124,125,128,132,144,150,158,162,170,171,179,181,183,185,189,192,193,196,197,205,210,212,214,218,219,220,224,225,226,231,233,234,235,238,239,244,245,253,254,255,257,258,260,266,268,269,272,273,275,276,278,279,283,284,289,292,295,296,300,301,304,307,308,310,312,],[56,56,56,56,56,56,56,56,-94,56,-79,56,-111,-32,56,56,56,56,-91,-76,-11,56,56,56,-38,56,56,56,56,56,-57,-33,-37,-36,-31,-21,56,56,-59,56,-108,56,56,-40,-34,56,-11,56,-11,-11,-30,56,-11,-60,-52,-25,-56,-16,-39,56,-53,-58,-61,56,-54,-63,-35,-55,56,-11,-65,-62,-64,]),'WHEN_TOK':([94,159,160,241,291,],[110,-49,-48,-85,-86,]),'FIRST_TOK':([112,124,125,128,132,144,150,158,162,170,171,177,179,181,183,185,189,193,196,197,205,210,214,218,219,220,224,225,233,234,235,238,239,244,245,253,254,257,258,260,268,269,272,273,275,276,278,279,283,284,289,292,295,296,300,301,304,307,308,310,312,],[132,-94,132,-79,132,-111,-32,185,132,132,132,205,-91,-76,-11,185,132,-38,132,132,185,185,-57,-33,-37,-36,-31,-21,-59,185,-108,185,185,-40,-34,185,-11,-11,-11,-30,-11,-60,-52,-25,-56,-16,-39,132,-53,-58,-61,132,-54,-63,-35,-55,185,-11,-65,-62,-64,]),'RP_TOK':([32,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,65,67,68,69,70,71,73,74,83,84,85,86,87,88,89,90,99,101,102,103,104,105,111,115,116,117,123,136,137,192,212,221,226,231,237,247,252,255,266,270,282,],[-102,67,-78,-87,-75,-96,-74,-89,-80,72,-88,-81,-116,-20,-84,-17,67,-100,-17,-97,-93,-17,-18,-82,-17,99,-18,103,104,-18,-26,-113,-117,-110,-17,-121,-120,-114,-102,136,137,-83,142,-118,-119,-102,-102,242,-102,-102,256,263,267,-102,-102,285,294,]),'FC_EXTRAS_TOK':([13,14,27,198,],[-95,28,-112,-28,]),'NL_TOK':([0,12,17,19,20,23,24,28,31,34,36,40,41,63,64,79,82,91,92,97,98,108,110,120,126,132,133,142,149,166,167,175,178,185,186,190,191,202,203,205,217,223,242,243,246,249,256,259,263,264,267,285,286,288,290,294,302,309,],[3,-19,-105,33,-19,-27,37,39,42,59,60,-98,-17,-18,-8,96,-115,-73,107,113,114,119,122,139,145,151,152,160,168,-71,193,200,208,213,215,219,220,-72,229,232,241,244,260,261,262,265,273,277,280,281,273,273,296,298,299,273,305,311,]),}
_lr_action = { }
for _k, _v in list(_lr_action_items.items()):
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'inc_plan_vars':([183,254,257,258,268,307,],[210,210,210,210,210,210,]),'when_opt':([94,],[109,]),'bc_rules_opt':([14,26,],[25,38,]),'parent_opt':([6,],[8,]),'fc_extras':([14,],[26,]),'start_extra_statements':([33,39,60,],[58,62,77,]),'bc_rules':([8,14,26,],[11,11,11,]),'file':([0,],[4,]),'fc_premise':([112,125,132,162,170,171,189,196,197,279,292,],[124,144,150,124,124,124,144,144,144,124,144,]),'python_plan_code':([176,297,306,],[203,302,309,]),'bc_require_opt':([276,],[289,]),'plan_spec':([256,267,285,294,],[272,283,295,301,]),'goal':([78,],[94,]),'plan_extras_opt':([22,],[35,]),'pattern':([32,73,88,111,112,125,132,158,162,170,171,185,189,192,196,197,205,210,212,226,231,234,238,239,253,255,266,279,292,304,],[47,90,105,47,127,127,127,127,127,127,127,127,127,47,127,127,127,127,47,47,47,127,127,127,127,47,47,127,127,127,]),'top':([0,],[2,]),'bc_premise':([158,185,205,210,234,238,239,253,304,],[179,214,233,235,179,179,179,179,179,]),'assertion':([134,154,],[153,172,]),'name':([158,177,185,205,210,211,230,234,238,239,253,304,],[184,204,184,184,184,236,251,184,184,184,184,184,]),'data_list':([43,65,],[68,83,]),'start_python_plan_call':([273,298,],[287,303,]),'pattern_proper':([32,43,65,73,85,88,111,112,125,132,158,162,170,171,185,189,192,196,197,205,210,212,226,231,234,238,239,253,255,266,279,292,304,],[50,69,69,50,69,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,]),'python_goal':([0,],[5,]),'without_names':([30,],[41,]),'bc_extras_opt':([11,],[22,]),'start_python_statements':([139,],[157,]),'patterns_opt':([32,111,192,212,226,231,255,266,],[51,123,221,237,247,252,270,282,]),'fc_require_opt':([225,],[245,]),'python_premise':([112,125,132,158,162,170,171,185,189,196,197,205,210,234,238,239,253,279,292,304,],[128,128,128,181,128,128,128,181,128,128,128,181,181,181,181,181,181,128,128,181,]),'with_opt':([109,],[121,]),'variable':([32,43,65,66,73,85,88,100,106,111,112,125,132,158,162,170,171,185,189,192,196,197,205,210,212,226,231,234,238,239,253,255,266,279,292,304,],[53,53,53,84,53,53,53,115,117,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,]),'fc_rule':([8,14,],[13,27,]),'start_python_code':([112,125,127,132,142,149,158,162,170,171,175,185,187,189,196,197,205,210,234,238,239,253,279,292,304,],[130,130,146,130,161,169,130,130,130,130,201,130,216,130,130,130,130,130,130,130,130,130,130,130,130,]),'bc_premises':([158,234,238,239,253,304,],[183,254,257,258,268,307,]),'data':([32,43,65,73,85,88,111,112,125,132,158,162,170,171,185,189,192,196,197,205,210,212,226,231,234,238,239,253,255,266,279,292,304,],[54,70,70,54,101,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,]),'patterns_proper':([43,65,85,],[71,71,102,]),'check_nl':([112,125,132,134,154,158,162,170,171,185,189,196,197,205,210,234,238,239,253,279,292,304,],[131,131,131,156,156,131,131,131,131,131,131,131,131,131,131,131,131,131,131,131,131,131,]),'rest_opt':([71,102,],[87,116,]),'fc_rules':([8,],[14,]),'bc_rules_section':([8,14,26,],[15,29,29,]),'python_extras_code':([75,81,93,],[92,98,108,]),'nl_opt':([0,],[6,]),'python_rule_code':([148,163,164,188,195,222,228,240,248,],[167,190,191,217,223,243,249,259,264,]),'colon_opt':([12,20,],[24,34,]),'fc_premises':([112,162,170,171,279,],[125,189,196,197,292,]),'patterns':([32,111,192,212,226,231,255,266,],[57,57,57,57,57,57,57,57,]),'comma_opt':([41,57,68,71,83,102,],[64,74,86,89,86,89,]),'reset_plan_vars':([122,],[141,]),'taking':([142,],[159,]),'without_opt':([17,],[31,]),'foreach_opt':([61,],[80,]),'bc_rule':([8,11,14,26,],[16,21,16,16,]),'start_python_assertion':([168,200,],[194,227,]),'assertions':([134,],[154,]),}
_lr_goto = { }
for _k, _v in list(_lr_goto_items.items()):
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> top","S'",1,None,None,None),
('top -> file','top',1,'p_top','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',40),
('top -> python_goal','top',1,'p_top','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',41),
('python_goal -> CHECK_TOK IDENTIFIER_TOK . IDENTIFIER_TOK LP_TOK patterns_opt RP_TOK','python_goal',7,'p_goal','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',46),
('file -> nl_opt parent_opt fc_rules bc_rules_opt','file',4,'p_file','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',51),
('file -> nl_opt parent_opt fc_rules fc_extras bc_rules_opt','file',5,'p_file_fc_extras','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',56),
('file -> nl_opt parent_opt bc_rules_section','file',3,'p_file_bc','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',61),
('parent_opt -> EXTENDING_TOK IDENTIFIER_TOK without_opt NL_TOK','parent_opt',4,'p_parent','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',72),
('without_opt -> WITHOUT_TOK without_names comma_opt','without_opt',3,'p_second','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',77),
('when_opt -> WHEN_TOK NL_TOK reset_plan_vars INDENT_TOK bc_premises DEINDENT_TOK','when_opt',6,'p_fourth','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',82),
('reset_plan_vars -> <empty>','reset_plan_vars',0,'p_reset_plan_vars','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',87),
('inc_plan_vars -> <empty>','inc_plan_vars',0,'p_inc_plan_vars','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',95),
('bc_extras_opt -> BC_EXTRAS_TOK NL_TOK start_extra_statements INDENT_TOK python_extras_code NL_TOK DEINDENT_TOK','bc_extras_opt',7,'p_fifth','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',103),
('fc_extras -> FC_EXTRAS_TOK NL_TOK start_extra_statements INDENT_TOK python_extras_code NL_TOK DEINDENT_TOK','fc_extras',7,'p_fifth','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',104),
('plan_extras_opt -> PLAN_EXTRAS_TOK NL_TOK start_extra_statements INDENT_TOK python_extras_code NL_TOK DEINDENT_TOK','plan_extras_opt',7,'p_fifth','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',105),
('with_opt -> WITH_TOK NL_TOK start_python_statements INDENT_TOK python_plan_code NL_TOK DEINDENT_TOK','with_opt',7,'p_fifth','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',106),
('bc_require_opt -> <empty>','bc_require_opt',0,'p_none','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',111),
('comma_opt -> <empty>','comma_opt',0,'p_none','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',112),
('comma_opt -> ,','comma_opt',1,'p_none','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',113),
('colon_opt -> <empty>','colon_opt',0,'p_none','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',114),
('data -> NONE_TOK','data',1,'p_none','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',115),
('fc_require_opt -> <empty>','fc_require_opt',0,'p_none','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',116),
('nl_opt -> <empty>','nl_opt',0,'p_none','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',117),
('nl_opt -> NL_TOK','nl_opt',1,'p_none','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',118),
('parent_opt -> <empty>','parent_opt',0,'p_none','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',119),
('plan_spec -> NL_TOK','plan_spec',1,'p_none','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',120),
('rest_opt -> comma_opt','rest_opt',1,'p_none','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',121),
('colon_opt -> :','colon_opt',1,'p_colon_deprication','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',126),
('fc_rule -> IDENTIFIER_TOK colon_opt NL_TOK INDENT_TOK foreach_opt ASSERT_TOK NL_TOK INDENT_TOK assertions DEINDENT_TOK DEINDENT_TOK','fc_rule',11,'p_fc_rule','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',134),
('foreach_opt -> FOREACH_TOK NL_TOK INDENT_TOK fc_premises DEINDENT_TOK','foreach_opt',5,'p_foreach','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',139),
('fc_premise -> IDENTIFIER_TOK . IDENTIFIER_TOK LP_TOK patterns_opt RP_TOK NL_TOK','fc_premise',7,'p_fc_premise','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',144),
('fc_premise -> FIRST_TOK NL_TOK INDENT_TOK fc_premises DEINDENT_TOK','fc_premise',5,'p_fc_first_1','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',149),
('fc_premise -> FIRST_TOK fc_premise','fc_premise',2,'p_fc_first_n','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',154),
('fc_premise -> NOTANY_TOK NL_TOK INDENT_TOK fc_premises DEINDENT_TOK','fc_premise',5,'p_fc_notany','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',159),
('fc_premise -> FORALL_TOK NL_TOK INDENT_TOK fc_premises DEINDENT_TOK fc_require_opt','fc_premise',6,'p_fc_forall','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',164),
('fc_require_opt -> REQUIRE_TOK NL_TOK INDENT_TOK fc_premises DEINDENT_TOK','fc_require_opt',5,'p_fc_require_opt','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',169),
('python_premise -> pattern start_python_code = python_rule_code NL_TOK','python_premise',5,'p_python_eq','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',174),
('python_premise -> pattern start_python_code IN_TOK python_rule_code NL_TOK','python_premise',5,'p_python_in','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',179),
('python_premise -> start_python_code CHECK_TOK python_rule_code NL_TOK','python_premise',4,'p_python_check','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',184),
('python_premise -> check_nl PYTHON_TOK NL_TOK start_python_assertion INDENT_TOK python_rule_code NL_TOK DEINDENT_TOK','python_premise',8,'p_python_block_n','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',189),
('python_premise -> check_nl PYTHON_TOK start_python_code NOT_NL_TOK python_rule_code NL_TOK','python_premise',6,'p_python_block_1','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',194),
('assertion -> IDENTIFIER_TOK . IDENTIFIER_TOK LP_TOK patterns_opt RP_TOK NL_TOK','assertion',7,'p_assertion','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',199),
('assertion -> check_nl PYTHON_TOK NL_TOK start_python_assertion INDENT_TOK python_rule_code NL_TOK DEINDENT_TOK','assertion',8,'p_python_assertion_n','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',204),
('assertion -> check_nl PYTHON_TOK start_python_code NOT_NL_TOK python_rule_code NL_TOK','assertion',6,'p_python_assertion_1','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',209),
('check_nl -> <empty>','check_nl',0,'p_check_nl','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',214),
('bc_rule -> IDENTIFIER_TOK colon_opt NL_TOK INDENT_TOK USE_TOK goal when_opt with_opt DEINDENT_TOK','bc_rule',9,'p_bc_rule','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',220),
('bc_rules_opt -> <empty>','bc_rules_opt',0,'p_empty_bc_rules_opt','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',225),
('bc_rules_section -> bc_rules bc_extras_opt plan_extras_opt','bc_rules_section',3,'p_bc_rules_section','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',230),
('goal -> IDENTIFIER_TOK LP_TOK patterns_opt RP_TOK NL_TOK','goal',5,'p_goal_no_taking','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',235),
('goal -> IDENTIFIER_TOK LP_TOK patterns_opt RP_TOK taking','goal',5,'p_goal_taking','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',240),
('name -> IDENTIFIER_TOK','name',1,'p_name_sym','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',245),
('name -> PATTERN_VAR_TOK','name',1,'p_name_pat_var','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',250),
('bc_premise -> name LP_TOK patterns_opt RP_TOK plan_spec','bc_premise',5,'p_bc_premise1','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',255),
('bc_premise -> ! name LP_TOK patterns_opt RP_TOK plan_spec','bc_premise',6,'p_bc_premise2','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',261),
('bc_premise -> name . name LP_TOK patterns_opt RP_TOK plan_spec','bc_premise',7,'p_bc_premise3','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',267),
('bc_premise -> ! name . name LP_TOK patterns_opt RP_TOK plan_spec','bc_premise',8,'p_bc_premise4','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',273),
('bc_premise -> FIRST_TOK NL_TOK INDENT_TOK bc_premises DEINDENT_TOK','bc_premise',5,'p_bc_first_1f','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',279),
('bc_premise -> FIRST_TOK bc_premise','bc_premise',2,'p_bc_first_nf','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',284),
('bc_premise -> ! FIRST_TOK NL_TOK INDENT_TOK bc_premises DEINDENT_TOK','bc_premise',6,'p_bc_first_1t','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',289),
('bc_premise -> ! FIRST_TOK bc_premise','bc_premise',3,'p_bc_first_nt','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',294),
('bc_premise -> NOTANY_TOK NL_TOK INDENT_TOK bc_premises DEINDENT_TOK','bc_premise',5,'p_bc_notany','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',299),
('bc_premise -> FORALL_TOK NL_TOK INDENT_TOK bc_premises DEINDENT_TOK bc_require_opt','bc_premise',6,'p_bc_forall','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',304),
('bc_require_opt -> REQUIRE_TOK NL_TOK INDENT_TOK bc_premises DEINDENT_TOK','bc_require_opt',5,'p_bc_require_opt','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',309),
('plan_spec -> AS_TOK PATTERN_VAR_TOK NL_TOK','plan_spec',3,'p_as','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',314),
('plan_spec -> STEP_TOK NUMBER_TOK NL_TOK start_python_plan_call INDENT_TOK python_plan_code NL_TOK DEINDENT_TOK','plan_spec',8,'p_step_code','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',319),
('plan_spec -> NL_TOK start_python_plan_call INDENT_TOK python_plan_code NL_TOK DEINDENT_TOK','plan_spec',6,'p_code','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',325),
('start_python_code -> <empty>','start_python_code',0,'p_start_python_code','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',330),
('start_python_plan_call -> <empty>','start_python_plan_call',0,'p_start_python_plan_call','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',336),
('start_python_statements -> <empty>','start_python_statements',0,'p_start_python_statements','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',342),
('start_extra_statements -> <empty>','start_extra_statements',0,'p_start_extra_statements','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',348),
('start_python_assertion -> <empty>','start_python_assertion',0,'p_start_python_assertion','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',354),
('python_rule_code -> CODE_TOK','python_rule_code',1,'p_python_rule_code','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',361),
('python_plan_code -> CODE_TOK','python_plan_code',1,'p_python_plan_code','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',366),
('python_extras_code -> CODE_TOK','python_extras_code',1,'p_python_extras_code','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',371),
('variable -> PATTERN_VAR_TOK','variable',1,'p_pattern_var','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',376),
('variable -> ANONYMOUS_VAR_TOK','variable',1,'p_anonymous_var','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',386),
('bc_premise -> python_premise','bc_premise',1,'p_first','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',394),
('bc_rules_opt -> bc_rules_section','bc_rules_opt',1,'p_first','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',395),
('data -> NUMBER_TOK','data',1,'p_first','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',396),
('fc_premise -> python_premise','fc_premise',1,'p_first','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',397),
('pattern -> pattern_proper','pattern',1,'p_first','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',398),
('pattern_proper -> variable','pattern_proper',1,'p_first','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',399),
('patterns_opt -> patterns comma_opt','patterns_opt',2,'p_first','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',400),
('rest_opt -> , * variable','rest_opt',3,'p_last','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',405),
('data -> STRING_TOK','data',1,'p_data_string','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',410),
('taking -> start_python_code TAKING_TOK python_rule_code NL_TOK','taking',4,'p_taking','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',421),
('taking -> NL_TOK INDENT_TOK start_python_code TAKING_TOK python_rule_code NL_TOK DEINDENT_TOK','taking',7,'p_taking2','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',426),
('data -> IDENTIFIER_TOK','data',1,'p_quoted_last','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',431),
('data -> FALSE_TOK','data',1,'p_false','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',439),
('data -> TRUE_TOK','data',1,'p_true','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',444),
('assertions -> assertion','assertions',1,'p_start_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',449),
('bc_premises -> bc_premise','bc_premises',1,'p_start_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',450),
('bc_rules -> bc_rule','bc_rules',1,'p_start_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',451),
('data_list -> data','data_list',1,'p_start_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',452),
('fc_premises -> fc_premise','fc_premises',1,'p_start_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',453),
('fc_rules -> fc_rule','fc_rules',1,'p_start_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',454),
('patterns -> pattern','patterns',1,'p_start_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',455),
('patterns_proper -> pattern_proper','patterns_proper',1,'p_start_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',456),
('without_names -> IDENTIFIER_TOK','without_names',1,'p_start_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',457),
('bc_extras_opt -> <empty>','bc_extras_opt',0,'p_empty_tuple','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',462),
('data -> LP_TOK RP_TOK','data',2,'p_empty_tuple','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',463),
('foreach_opt -> <empty>','foreach_opt',0,'p_empty_tuple','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',464),
('patterns_opt -> <empty>','patterns_opt',0,'p_empty_tuple','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',465),
('plan_extras_opt -> <empty>','plan_extras_opt',0,'p_empty_tuple','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',466),
('when_opt -> <empty>','when_opt',0,'p_empty_tuple','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',467),
('without_opt -> <empty>','without_opt',0,'p_empty_tuple','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',468),
('with_opt -> <empty>','with_opt',0,'p_double_empty_tuple','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',473),
('assertions -> assertions assertion','assertions',2,'p_append_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',478),
('bc_premises -> bc_premises inc_plan_vars bc_premise','bc_premises',3,'p_append_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',479),
('bc_rules -> bc_rules bc_rule','bc_rules',2,'p_append_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',480),
('data_list -> data_list , data','data_list',3,'p_append_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',481),
('fc_premises -> fc_premises fc_premise','fc_premises',2,'p_append_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',482),
('fc_rules -> fc_rules fc_rule','fc_rules',2,'p_append_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',483),
('patterns -> patterns , pattern','patterns',3,'p_append_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',484),
('patterns_proper -> patterns_proper , pattern','patterns_proper',3,'p_append_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',485),
('without_names -> without_names , IDENTIFIER_TOK','without_names',3,'p_append_list','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',486),
('pattern -> data','pattern',1,'p_pattern','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',492),
('pattern_proper -> LP_TOK * variable RP_TOK','pattern_proper',4,'p_pattern_tuple1','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',499),
('pattern_proper -> LP_TOK data_list , * variable RP_TOK','pattern_proper',6,'p_pattern_tuple2','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',506),
('pattern_proper -> LP_TOK data_list , patterns_proper rest_opt RP_TOK','pattern_proper',6,'p_pattern_tuple3','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',518),
('pattern_proper -> LP_TOK patterns_proper rest_opt RP_TOK','pattern_proper',4,'p_pattern_tuple4','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',534),
('data -> LP_TOK data_list comma_opt RP_TOK','data',4,'p_tuple','/home/bruce/python/workareas/pyke-hg/r1_working/pyke/krb_compiler/krbparser.py',543),
]
| 243.238411
| 12,017
| 0.721283
| 7,265
| 36,729
| 3.478458
| 0.07075
| 0.043449
| 0.072415
| 0.115864
| 0.672827
| 0.64849
| 0.620553
| 0.596375
| 0.58023
| 0.569032
| 0
| 0.274274
| 0.026981
| 36,729
| 150
| 12,018
| 244.86
| 0.432845
| 0.003703
| 0
| 0.014184
| 1
| 0.858156
| 0.506928
| 0.273743
| 0
| 0
| 0
| 0
| 0.070922
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0c14a4972373d6e64a2114e6bc1e20049e46ea58
| 652
|
py
|
Python
|
src/spaceone/monitoring/info/__init__.py
|
jean1042/monitoring
|
0585a1ea52ec13285eaca81cc5b19fa3f7a1fba4
|
[
"Apache-2.0"
] | 5
|
2020-06-04T23:01:30.000Z
|
2020-09-09T08:58:51.000Z
|
src/spaceone/monitoring/info/__init__.py
|
jean1042/monitoring
|
0585a1ea52ec13285eaca81cc5b19fa3f7a1fba4
|
[
"Apache-2.0"
] | 8
|
2021-11-12T08:13:00.000Z
|
2022-03-28T11:13:12.000Z
|
src/spaceone/monitoring/info/__init__.py
|
jean1042/monitoring
|
0585a1ea52ec13285eaca81cc5b19fa3f7a1fba4
|
[
"Apache-2.0"
] | 7
|
2020-06-10T01:56:35.000Z
|
2021-12-02T05:36:21.000Z
|
from spaceone.monitoring.info.common_info import *
from spaceone.monitoring.info.data_source_info import *
from spaceone.monitoring.info.metric_info import *
from spaceone.monitoring.info.log_info import *
from spaceone.monitoring.info.project_alert_config_info import *
from spaceone.monitoring.info.escalation_policy_info import *
from spaceone.monitoring.info.event_rule_info import *
from spaceone.monitoring.info.webhook_info import *
from spaceone.monitoring.info.maintenance_window_info import *
from spaceone.monitoring.info.alert_info import *
from spaceone.monitoring.info.note_info import *
from spaceone.monitoring.info.event_info import *
| 50.153846
| 64
| 0.852761
| 90
| 652
| 5.977778
| 0.233333
| 0.267658
| 0.490706
| 0.579926
| 0.754647
| 0.754647
| 0.152416
| 0
| 0
| 0
| 0
| 0
| 0.07362
| 652
| 12
| 65
| 54.333333
| 0.890728
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
0c1a0fecf34dace52dc6da81e3a24b419c9e73d5
| 105
|
py
|
Python
|
inclusive_django_range_fields/drf/__init__.py
|
Hipo/inclusive-django-range-fields
|
f40d915fc8bfbfda5cba59fcabb1831fae486fd4
|
[
"MIT"
] | 16
|
2019-12-19T13:35:54.000Z
|
2021-08-16T20:59:45.000Z
|
inclusive_django_range_fields/drf/__init__.py
|
Hipo/inclusive-django-range-fields
|
f40d915fc8bfbfda5cba59fcabb1831fae486fd4
|
[
"MIT"
] | 1
|
2020-02-07T11:39:38.000Z
|
2020-02-07T11:39:38.000Z
|
inclusive_django_range_fields/drf/__init__.py
|
Hipo/inclusive-django-range-fields
|
f40d915fc8bfbfda5cba59fcabb1831fae486fd4
|
[
"MIT"
] | null | null | null |
from inclusive_django_range_fields.drf.fields import InclusiveIntegerRangeField, InclusiveDateRangeField
| 52.5
| 104
| 0.92381
| 10
| 105
| 9.4
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 105
| 1
| 105
| 105
| 0.94
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0c3f5db4183a8fb4a833799f5fc1535a3848610e
| 5,128
|
py
|
Python
|
tests/rules/test_IAMRolesOverprivilegedRule.py
|
lpmi-13/cfripper
|
36bfdc45855112496977806e1a93d98d010399ed
|
[
"Apache-2.0"
] | 360
|
2018-08-08T12:34:58.000Z
|
2022-03-25T17:01:41.000Z
|
tests/rules/test_IAMRolesOverprivilegedRule.py
|
lpmi-13/cfripper
|
36bfdc45855112496977806e1a93d98d010399ed
|
[
"Apache-2.0"
] | 40
|
2018-11-26T07:08:15.000Z
|
2022-03-02T09:10:45.000Z
|
tests/rules/test_IAMRolesOverprivilegedRule.py
|
lpmi-13/cfripper
|
36bfdc45855112496977806e1a93d98d010399ed
|
[
"Apache-2.0"
] | 51
|
2018-11-09T11:46:32.000Z
|
2022-03-28T08:47:28.000Z
|
import pytest
from pycfmodel.model.cf_model import CFModel
from cfripper.model.enums import RuleGranularity, RuleMode, RuleRisk
from cfripper.model.result import Failure
from cfripper.rules.iam_roles import IAMRolesOverprivilegedRule
from tests.utils import compare_lists_of_failures, get_cfmodel_from
@pytest.fixture()
def valid_role_inline_policy() -> CFModel:
return get_cfmodel_from("rules/IAMRolesOverprivilegedRule/valid_role_inline_policy.json").resolve()
@pytest.fixture()
def invalid_role_inline_policy() -> CFModel:
return get_cfmodel_from("rules/IAMRolesOverprivilegedRule/invalid_role_inline_policy.json").resolve()
@pytest.fixture()
def invalid_role_inline_policy_resource_as_array() -> CFModel:
return get_cfmodel_from(
"rules/IAMRolesOverprivilegedRule/invalid_role_inline_policy_resource_as_array.json"
).resolve()
@pytest.fixture()
def valid_role_managed_policy() -> CFModel:
return get_cfmodel_from("rules/IAMRolesOverprivilegedRule/valid_role_managed_policy.json").resolve()
@pytest.fixture()
def invalid_role_managed_policy() -> CFModel:
return get_cfmodel_from("rules/IAMRolesOverprivilegedRule/invalid_role_managed_policy.json").resolve()
@pytest.fixture()
def invalid_role_inline_policy_fn_if() -> CFModel:
return get_cfmodel_from("rules/IAMRolesOverprivilegedRule/invalid_role_inline_policy_fn_if.json").resolve()
def test_with_valid_role_inline_policy(valid_role_inline_policy):
rule = IAMRolesOverprivilegedRule(None)
result = rule.invoke(valid_role_inline_policy)
assert result.valid
assert compare_lists_of_failures(result.failures, [])
def test_with_invalid_role_inline_policy(invalid_role_inline_policy):
rule = IAMRolesOverprivilegedRule(None)
result = rule.invoke(invalid_role_inline_policy)
assert not result.valid
assert compare_lists_of_failures(
result.failures,
[
Failure(
granularity=RuleGranularity.RESOURCE,
reason="Role 'RootRole' contains an insecure permission 'ec2:DeleteInternetGateway' in policy 'not_so_chill_policy'",
risk_value=RuleRisk.MEDIUM,
rule="IAMRolesOverprivilegedRule",
rule_mode=RuleMode.BLOCKING,
actions=None,
resource_ids={"RootRole"},
)
],
)
def test_with_invalid_role_inline_policy_resource_as_array(invalid_role_inline_policy_resource_as_array):
rule = IAMRolesOverprivilegedRule(None)
result = rule.invoke(invalid_role_inline_policy_resource_as_array)
assert not result.valid
assert compare_lists_of_failures(
result.failures,
[
Failure(
granularity=RuleGranularity.RESOURCE,
reason="Role 'RootRole' contains an insecure permission 'ec2:DeleteInternetGateway' in policy 'not_so_chill_policy'",
risk_value=RuleRisk.MEDIUM,
rule="IAMRolesOverprivilegedRule",
rule_mode=RuleMode.BLOCKING,
actions=None,
resource_ids={"RootRole"},
)
],
)
def test_with_valid_role_managed_policy(valid_role_managed_policy):
rule = IAMRolesOverprivilegedRule(None)
result = rule.invoke(valid_role_managed_policy)
assert result.valid
assert compare_lists_of_failures(result.failures, [])
def test_with_invalid_role_managed_policy(invalid_role_managed_policy):
rule = IAMRolesOverprivilegedRule(None)
result = rule.invoke(invalid_role_managed_policy)
assert not result.valid
assert compare_lists_of_failures(
result.failures,
[
Failure(
granularity=RuleGranularity.RESOURCE,
reason="Role RootRole has forbidden Managed Policy arn:aws:iam::aws:policy/AdministratorAccess",
risk_value=RuleRisk.MEDIUM,
rule="IAMRolesOverprivilegedRule",
rule_mode=RuleMode.BLOCKING,
actions=None,
resource_ids={"RootRole"},
)
],
)
def test_with_invalid_role_inline_policy_fn_if(invalid_role_inline_policy_fn_if):
rule = IAMRolesOverprivilegedRule(None)
result = rule.invoke(invalid_role_inline_policy_fn_if)
assert not result.valid
assert compare_lists_of_failures(
result.failures,
[
Failure(
granularity=RuleGranularity.RESOURCE,
reason="Role 'RootRole' contains an insecure permission 'ec2:DeleteVpc' in policy 'ProdCredentialStoreAccessPolicy'",
risk_value=RuleRisk.MEDIUM,
rule="IAMRolesOverprivilegedRule",
rule_mode=RuleMode.BLOCKING,
actions=None,
resource_ids={"RootRole"},
)
],
)
def test_rule_supports_filter_config(invalid_role_managed_policy, default_allow_all_config):
rule = IAMRolesOverprivilegedRule(default_allow_all_config)
result = rule.invoke(invalid_role_managed_policy)
assert result.valid
assert compare_lists_of_failures(result.failures, [])
| 34.648649
| 133
| 0.713534
| 544
| 5,128
| 6.356618
| 0.147059
| 0.069983
| 0.092539
| 0.099769
| 0.836032
| 0.812319
| 0.799884
| 0.768074
| 0.759399
| 0.689705
| 0
| 0.000742
| 0.211388
| 5,128
| 147
| 134
| 34.884354
| 0.854352
| 0
| 0
| 0.584071
| 0
| 0
| 0.185062
| 0.132995
| 0
| 0
| 0
| 0
| 0.123894
| 1
| 0.115044
| false
| 0
| 0.053097
| 0.053097
| 0.221239
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a749a396f15188ef345b4ae7c53017b6004c5e71
| 52,457
|
py
|
Python
|
tensorflow/contrib/bayesflow/python/ops/layers_dense_variational_impl.py
|
harunpehlivan/tensorflow
|
376e2cfdab31f4da251ea2e50992a9bf97fd171b
|
[
"Apache-2.0"
] | 22
|
2018-01-13T14:52:47.000Z
|
2018-07-05T01:00:28.000Z
|
tensorflow/contrib/bayesflow/python/ops/layers_dense_variational_impl.py
|
hamzabekkouri/tensorflow
|
d87a9fbbc5f49ec5ae8eb52c62628f0b1a0bf67f
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/bayesflow/python/ops/layers_dense_variational_impl.py
|
hamzabekkouri/tensorflow
|
d87a9fbbc5f49ec5ae8eb52c62628f0b1a0bf67f
|
[
"Apache-2.0"
] | 3
|
2018-01-20T06:47:34.000Z
|
2018-05-07T19:14:34.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dense Bayesian layer using KL-divergence based variational inference.
@@DenseReparameterization
@@DenseLocalReparameterization
@@DenseFlipout
@@dense_reparameterization
@@dense_local_reparameterization
@@dense_flipout
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.bayesflow.python.ops import layers_util
from tensorflow.contrib.distributions.python.ops import independent as independent_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as layers_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops.distributions import kullback_leibler as kl_lib
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import util as distribution_util
__all__ = [
"DenseReparameterization",
"DenseLocalReparameterization",
"DenseFlipout",
"dense_reparameterization",
"dense_local_reparameterization",
"dense_flipout",
]
class _DenseVariational(layers_lib.Layer):
"""Abstract densely-connected class (private, used as implementation base).
This layer implements the Bayesian variational inference analogue to
a dense layer by assuming the `kernel` and/or the `bias` are drawn
from distributions. By default, the layer implements a stochastic
forward pass via sampling from the kernel and bias posteriors,
```none
kernel, bias ~ posterior
outputs = activation(matmul(inputs, kernel) + bias)
```
The arguments permit separate specification of the surrogate posterior
(`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`
distributions.
Args:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (`callable`). Set it to None to maintain a
linear activation.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
kernel_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `kernel` parameter. Default value:
`default_mean_field_normal_fn()`.
kernel_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
kernel_prior_fn: Python `callable` which creates `tf.distributions`
instance. See `default_mean_field_normal_fn` docstring for required
parameter signature.
Default value: `tf.distributions.Normal(loc=0., scale=1.)`.
kernel_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
bias_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `bias` parameter. Default value:
`default_mean_field_normal_fn(is_singular=True)` (which creates an
instance of `tf.distributions.Deterministic`).
bias_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
bias_prior_fn: Python `callable` which creates `tf.distributions` instance.
See `default_mean_field_normal_fn` docstring for required parameter
signature. Default value: `None` (no prior, no variational inference)
bias_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
name: Python `str`, the name of the layer. Layers with the same name will
share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in
such cases.
reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous
layer by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (`callable`).
activity_regularizer: Regularizer function for the output.
kernel_posterior_fn: `callable` returning posterior.
kernel_posterior_tensor_fn: `callable` operating on posterior.
kernel_prior_fn: `callable` returning prior.
kernel_divergence_fn: `callable` returning divergence.
bias_posterior_fn: `callable` returning posterior.
bias_posterior_tensor_fn: `callable` operating on posterior.
bias_prior_fn: `callable` returning prior.
bias_divergence_fn: `callable` returning divergence.
"""
def __init__(
self,
units,
activation=None,
activity_regularizer=None,
trainable=True,
kernel_posterior_fn=layers_util.default_mean_field_normal_fn(),
kernel_posterior_tensor_fn=lambda d: d.sample(),
kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda
loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)),
kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
bias_posterior_fn=layers_util.default_mean_field_normal_fn(is_singular=True), # pylint: disable=line-too-long
bias_posterior_tensor_fn=lambda d: d.sample(),
bias_prior_fn=None,
bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
name=None,
**kwargs):
super(_DenseVariational, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.units = units
self.activation = activation
self.input_spec = layers_lib.InputSpec(min_ndim=2)
self.kernel_posterior_fn = kernel_posterior_fn
self.kernel_posterior_tensor_fn = kernel_posterior_tensor_fn
self.kernel_prior_fn = kernel_prior_fn
self.kernel_divergence_fn = kernel_divergence_fn
self.bias_posterior_fn = bias_posterior_fn
self.bias_posterior_tensor_fn = bias_posterior_tensor_fn
self.bias_prior_fn = bias_prior_fn
self.bias_divergence_fn = bias_divergence_fn
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
in_size = input_shape.with_rank_at_least(2)[-1].value
if in_size is None:
raise ValueError("The last dimension of the inputs to `Dense` "
"should be defined. Found `None`.")
self._input_spec = layers_lib.InputSpec(min_ndim=2, axes={-1: in_size})
dtype = dtypes.as_dtype(self.dtype)
# Must have a posterior kernel.
self.kernel_posterior = self.kernel_posterior_fn(
dtype, [in_size, self.units], "kernel_posterior",
self.trainable, self.add_variable)
if self.kernel_prior_fn is None:
self.kernel_prior = None
else:
self.kernel_prior = self.kernel_prior_fn(
dtype, [in_size, self.units], "kernel_prior",
self.trainable, self.add_variable)
self._built_kernel_divergence = False
if self.bias_posterior_fn is None:
self.bias_posterior = None
else:
self.bias_posterior = self.bias_posterior_fn(
dtype, [self.units], "bias_posterior",
self.trainable, self.add_variable)
if self.bias_prior_fn is None:
self.bias_prior = None
else:
self.bias_prior = self.bias_prior_fn(
dtype, [self.units], "bias_prior",
self.trainable, self.add_variable)
self._built_bias_divergence = False
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
outputs = self._apply_variational_kernel(inputs)
outputs = self._apply_variational_bias(outputs)
if self.activation is not None:
outputs = self.activation(outputs) # pylint: disable=not-callable
if not self._built_kernel_divergence:
kernel_posterior = self.kernel_posterior
kernel_prior = self.kernel_prior
if isinstance(self.kernel_posterior, independent_lib.Independent):
kernel_posterior = kernel_posterior.distribution
if isinstance(self.kernel_prior, independent_lib.Independent):
kernel_prior = kernel_prior.distribution
self._apply_divergence(self.kernel_divergence_fn,
kernel_posterior,
kernel_prior,
self.kernel_posterior_tensor,
name="divergence_kernel")
self._built_kernel_divergence = True
if not self._built_bias_divergence:
bias_posterior = self.bias_posterior
bias_prior = self.bias_prior
if isinstance(self.bias_posterior, independent_lib.Independent):
bias_posterior = bias_posterior.distribution
if isinstance(self.bias_prior, independent_lib.Independent):
bias_prior = bias_prior.distribution
self._apply_divergence(self.bias_divergence_fn,
bias_posterior,
bias_prior,
self.bias_posterior_tensor,
name="divergence_bias")
self._built_bias_divergence = True
return outputs
def _apply_variational_bias(self, inputs):
if self.bias_posterior is None:
self.bias_posterior_tensor = None
return inputs
self.bias_posterior_tensor = self.bias_posterior_tensor_fn(
self.bias_posterior)
return nn.bias_add(inputs, self.bias_posterior_tensor)
def _apply_divergence(self, divergence_fn, posterior, prior,
posterior_tensor, name):
if (divergence_fn is None or
posterior is None or
prior is None):
divergence = None
return
divergence = standard_ops.identity(
divergence_fn(
posterior, prior, posterior_tensor),
name=name)
self.add_loss(divergence)
def _matmul(self, inputs, kernel):
if inputs.shape.ndims <= 2:
return standard_ops.matmul(inputs, kernel)
# To handle broadcasting, we must use `tensordot`.
return standard_ops.tensordot(inputs, kernel, axes=[[-1], [0]])
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).with_rank_at_least(2)
if input_shape[-1].value is None:
raise ValueError(
"The innermost dimension of input_shape must be defined, "
"but saw: {}".format(input_shape))
return input_shape[:-1].concatenate(self.units)
class DenseReparameterization(_DenseVariational):
"""Densely-connected layer class with reparameterization estimator.
This layer implements the Bayesian variational inference analogue to
a dense layer by assuming the `kernel` and/or the `bias` are drawn
from distributions. By default, the layer implements a stochastic
forward pass via sampling from the kernel and bias posteriors,
```none
kernel, bias ~ posterior
outputs = activation(matmul(inputs, kernel) + bias)
```
The arguments permit separate specification of the surrogate posterior
(`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`
distributions.
Args:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (`callable`). Set it to None to maintain a
linear activation.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
kernel_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `kernel` parameter. Default value:
`default_mean_field_normal_fn()`.
kernel_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
kernel_prior_fn: Python `callable` which creates `tf.distributions`
instance. See `default_mean_field_normal_fn` docstring for required
parameter signature.
Default value: `tf.distributions.Normal(loc=0., scale=1.)`.
kernel_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
bias_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `bias` parameter. Default value:
`default_mean_field_normal_fn(is_singular=True)` (which creates an
instance of `tf.distributions.Deterministic`).
bias_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
bias_prior_fn: Python `callable` which creates `tf.distributions` instance.
See `default_mean_field_normal_fn` docstring for required parameter
signature. Default value: `None` (no prior, no variational inference)
bias_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
name: Python `str`, the name of the layer. Layers with the same name will
share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in
such cases.
reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous
layer by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (`callable`).
activity_regularizer: Regularizer function for the output.
kernel_posterior_fn: `callable` returning posterior.
kernel_posterior_tensor_fn: `callable` operating on posterior.
kernel_prior_fn: `callable` returning prior.
kernel_divergence_fn: `callable` returning divergence.
bias_posterior_fn: `callable` returning posterior.
bias_posterior_tensor_fn: `callable` operating on posterior.
bias_prior_fn: `callable` returning prior.
bias_divergence_fn: `callable` returning divergence.
#### Examples
We illustrate a Bayesian neural network with [variational inference](
https://en.wikipedia.org/wiki/Variational_Bayesian_methods),
assuming a dataset of `features` and `labels`.
```python
tfp = tf.contrib.bayesflow
net = tfp.layers.DenseReparameterization(
512, activation=tf.nn.relu)(features)
logits = tfp.layers.DenseReparameterization(10)(net)
neg_log_likelihood = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
kl = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
loss = neg_log_likelihood + kl
train_op = tf.train.AdamOptimizer().minimize(loss)
```
It uses reparameterization gradients to minimize the
Kullback-Leibler divergence up to a constant, also known as the
negative Evidence Lower Bound. It consists of the sum of two terms:
the expected negative log-likelihood, which we approximate via
Monte Carlo; and the KL divergence, which is added via regularizer
terms which are arguments to the layer.
"""
def __init__(
self,
units,
activation=None,
activity_regularizer=None,
trainable=True,
kernel_posterior_fn=layers_util.default_mean_field_normal_fn(),
kernel_posterior_tensor_fn=lambda d: d.sample(),
kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda
loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)),
kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
bias_posterior_fn=layers_util.default_mean_field_normal_fn(
is_singular=True),
bias_posterior_tensor_fn=lambda d: d.sample(),
bias_prior_fn=None,
bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
name=None,
**kwargs):
super(DenseReparameterization, self).__init__(
units=units,
activation=activation,
activity_regularizer=activity_regularizer,
trainable=trainable,
kernel_posterior_fn=kernel_posterior_fn,
kernel_posterior_tensor_fn=kernel_posterior_tensor_fn,
kernel_prior_fn=kernel_prior_fn,
kernel_divergence_fn=kernel_divergence_fn,
bias_posterior_fn=bias_posterior_fn,
bias_posterior_tensor_fn=bias_posterior_tensor_fn,
bias_prior_fn=bias_prior_fn,
bias_divergence_fn=bias_divergence_fn,
name=name,
**kwargs)
def _apply_variational_kernel(self, inputs):
self.kernel_posterior_tensor = self.kernel_posterior_tensor_fn(
self.kernel_posterior)
self.kernel_posterior_affine = None
self.kernel_posterior_affine_tensor = None
return self._matmul(inputs, self.kernel_posterior_tensor)
def dense_reparameterization(
inputs,
units,
activation=None,
activity_regularizer=None,
trainable=True,
kernel_posterior_fn=layers_util.default_mean_field_normal_fn(),
kernel_posterior_tensor_fn=lambda d: d.sample(),
kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda
loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)),
kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
bias_posterior_fn=layers_util.default_mean_field_normal_fn(is_singular=True), # pylint: disable=line-too-long
bias_posterior_tensor_fn=lambda d: d.sample(),
bias_prior_fn=None,
bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
name=None,
reuse=None):
"""Densely-connected layer with reparameterization estimator.
This layer implements the Bayesian variational inference analogue to
a dense layer by assuming the `kernel` and/or the `bias` are drawn
from distributions. By default, the layer implements a stochastic
forward pass via sampling from the kernel and bias posteriors,
```none
kernel, bias ~ posterior
outputs = activation(matmul(inputs, kernel) + bias)
```
The arguments permit separate specification of the surrogate posterior
(`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`
distributions.
Args:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (`callable`). Set it to None to maintain a
linear activation.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
kernel_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `kernel` parameter. Default value:
`default_mean_field_normal_fn()`.
kernel_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
kernel_prior_fn: Python `callable` which creates `tf.distributions`
instance. See `default_mean_field_normal_fn` docstring for required
parameter signature.
Default value: `tf.distributions.Normal(loc=0., scale=1.)`.
kernel_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
bias_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `bias` parameter. Default value:
`default_mean_field_normal_fn(is_singular=True)` (which creates an
instance of `tf.distributions.Deterministic`).
bias_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
bias_prior_fn: Python `callable` which creates `tf.distributions` instance.
See `default_mean_field_normal_fn` docstring for required parameter
signature. Default value: `None` (no prior, no variational inference)
bias_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
name: Python `str`, the name of the layer. Layers with the same name will
share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in
such cases.
reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous
layer by the same name.
Returns:
output: `Tensor` representing a the affine transformed input under a random
draw from the surrogate posterior distribution.
#### Examples
We illustrate a Bayesian neural network with [variational inference](
https://en.wikipedia.org/wiki/Variational_Bayesian_methods),
assuming a dataset of `features` and `labels`.
```python
tfp = tf.contrib.bayesflow
net = tfp.layers.dense_reparameterization(
features, 512, activation=tf.nn.relu)
logits = tfp.layers.dense_reparameterization(net, 10)
neg_log_likelihood = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
kl = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
loss = neg_log_likelihood + kl
train_op = tf.train.AdamOptimizer().minimize(loss)
```
It uses reparameterization gradients to minimize the
Kullback-Leibler divergence up to a constant, also known as the
negative Evidence Lower Bound. It consists of the sum of two terms:
the expected negative log-likelihood, which we approximate via
Monte Carlo; and the KL divergence, which is added via regularizer
terms which are arguments to the layer.
"""
layer = DenseReparameterization(
units,
activation=activation,
activity_regularizer=activity_regularizer,
trainable=trainable,
kernel_posterior_fn=kernel_posterior_fn,
kernel_posterior_tensor_fn=kernel_posterior_tensor_fn,
kernel_prior_fn=kernel_prior_fn,
kernel_divergence_fn=kernel_divergence_fn,
bias_posterior_fn=bias_posterior_fn,
bias_posterior_tensor_fn=bias_posterior_tensor_fn,
bias_prior_fn=bias_prior_fn,
bias_divergence_fn=bias_divergence_fn,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
class DenseLocalReparameterization(_DenseVariational):
"""Densely-connected layer class with local reparameterization estimator.
This layer implements the Bayesian variational inference analogue to
a dense layer by assuming the `kernel` and/or the `bias` are drawn
from distributions. By default, the layer implements a stochastic
forward pass via sampling from the kernel and bias posteriors,
```none
kernel, bias ~ posterior
outputs = activation(matmul(inputs, kernel) + bias)
```
The arguments permit separate specification of the surrogate posterior
(`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`
distributions.
Args:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (`callable`). Set it to None to maintain a
linear activation.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
kernel_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `kernel` parameter. Default value:
`default_mean_field_normal_fn()`.
kernel_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
kernel_prior_fn: Python `callable` which creates `tf.distributions`
instance. See `default_mean_field_normal_fn` docstring for required
parameter signature.
Default value: `tf.distributions.Normal(loc=0., scale=1.)`.
kernel_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
bias_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `bias` parameter. Default value:
`default_mean_field_normal_fn(is_singular=True)` (which creates an
instance of `tf.distributions.Deterministic`).
bias_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
bias_prior_fn: Python `callable` which creates `tf.distributions` instance.
See `default_mean_field_normal_fn` docstring for required parameter
signature. Default value: `None` (no prior, no variational inference)
bias_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
name: Python `str`, the name of the layer. Layers with the same name will
share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in
such cases.
reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous
layer by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (`callable`).
activity_regularizer: Regularizer function for the output.
kernel_posterior_fn: `callable` returning posterior.
kernel_posterior_tensor_fn: `callable` operating on posterior.
kernel_prior_fn: `callable` returning prior.
kernel_divergence_fn: `callable` returning divergence.
bias_posterior_fn: `callable` returning posterior.
bias_posterior_tensor_fn: `callable` operating on posterior.
bias_prior_fn: `callable` returning prior.
bias_divergence_fn: `callable` returning divergence.
#### Examples
We illustrate a Bayesian neural network with [variational inference](
https://en.wikipedia.org/wiki/Variational_Bayesian_methods),
assuming a dataset of `features` and `labels`.
```python
tfp = tf.contrib.bayesflow
net = tfp.layers.DenseLocalReparameterization(
512, activation=tf.nn.relu)(features)
logits = tfp.layers.DenseLocalReparameterization(10)(net)
neg_log_likelihood = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
kl = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
loss = neg_log_likelihood + kl
train_op = tf.train.AdamOptimizer().minimize(loss)
```
It uses local reparameterization gradients to minimize the
Kullback-Leibler divergence up to a constant, also known as the
negative Evidence Lower Bound. It consists of the sum of two terms:
the expected negative log-likelihood, which we approximate via
Monte Carlo; and the KL divergence, which is added via regularizer
terms which are arguments to the layer.
"""
def __init__(
self,
units,
activation=None,
activity_regularizer=None,
trainable=True,
kernel_posterior_fn=layers_util.default_mean_field_normal_fn(),
kernel_posterior_tensor_fn=lambda d: d.sample(),
kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda
loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)),
kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
bias_posterior_fn=layers_util.default_mean_field_normal_fn(
is_singular=True),
bias_posterior_tensor_fn=lambda d: d.sample(),
bias_prior_fn=None,
bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
name=None,
**kwargs):
super(DenseLocalReparameterization, self).__init__(
units=units,
activation=activation,
activity_regularizer=activity_regularizer,
trainable=trainable,
kernel_posterior_fn=kernel_posterior_fn,
kernel_posterior_tensor_fn=kernel_posterior_tensor_fn,
kernel_prior_fn=kernel_prior_fn,
kernel_divergence_fn=kernel_divergence_fn,
bias_posterior_fn=bias_posterior_fn,
bias_posterior_tensor_fn=bias_posterior_tensor_fn,
bias_prior_fn=bias_prior_fn,
bias_divergence_fn=bias_divergence_fn,
name=name,
**kwargs)
def _apply_variational_kernel(self, inputs):
if (not isinstance(self.kernel_posterior, independent_lib.Independent) or
not isinstance(self.kernel_posterior.distribution, normal_lib.Normal)):
raise TypeError(
"`DenseLocalReparameterization` requires "
"`kernel_posterior_fn` produce an instance of "
"`tf.distributions.Independent(tf.distributions.Normal)` "
"(saw: \"{}\").".format(type(self.kernel_posterior).__name__))
self.kernel_posterior_affine = normal_lib.Normal(
loc=self._matmul(inputs, self.kernel_posterior.distribution.loc),
scale=standard_ops.sqrt(self._matmul(
standard_ops.square(inputs),
standard_ops.square(self.kernel_posterior.distribution.scale))))
self.kernel_posterior_affine_tensor = (
self.kernel_posterior_tensor_fn(self.kernel_posterior_affine))
self.kernel_posterior_tensor = None
return self.kernel_posterior_affine_tensor
def dense_local_reparameterization(
inputs,
units,
activation=None,
activity_regularizer=None,
trainable=True,
kernel_posterior_fn=layers_util.default_mean_field_normal_fn(),
kernel_posterior_tensor_fn=lambda d: d.sample(),
kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda
loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)),
kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
bias_posterior_fn=layers_util.default_mean_field_normal_fn(
is_singular=True),
bias_posterior_tensor_fn=lambda d: d.sample(),
bias_prior_fn=None,
bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
name=None,
reuse=None):
"""Densely-connected layer with local reparameterization estimator.
This layer implements the Bayesian variational inference analogue to
a dense layer by assuming the `kernel` and/or the `bias` are drawn
from distributions. By default, the layer implements a stochastic
forward pass via sampling from the kernel and bias posteriors,
```none
kernel, bias ~ posterior
outputs = activation(matmul(inputs, kernel) + bias)
```
The arguments permit separate specification of the surrogate posterior
(`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`
distributions.
Args:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (`callable`). Set it to None to maintain a
linear activation.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
kernel_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `kernel` parameter. Default value:
`default_mean_field_normal_fn()`.
kernel_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
kernel_prior_fn: Python `callable` which creates `tf.distributions`
instance. See `default_mean_field_normal_fn` docstring for required
parameter signature.
Default value: `tf.distributions.Normal(loc=0., scale=1.)`.
kernel_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
bias_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `bias` parameter. Default value:
`default_mean_field_normal_fn(is_singular=True)` (which creates an
instance of `tf.distributions.Deterministic`).
bias_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
bias_prior_fn: Python `callable` which creates `tf.distributions` instance.
See `default_mean_field_normal_fn` docstring for required parameter
signature. Default value: `None` (no prior, no variational inference)
bias_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
name: Python `str`, the name of the layer. Layers with the same name will
share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in
such cases.
reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous
layer by the same name.
Returns:
output: `Tensor` representing a the affine transformed input under a random
draw from the surrogate posterior distribution.
#### Examples
We illustrate a Bayesian neural network with [variational inference](
https://en.wikipedia.org/wiki/Variational_Bayesian_methods),
assuming a dataset of `features` and `labels`.
```python
tfp = tf.contrib.bayesflow
net = tfp.layers.dense_local_reparameterization(
features, 512, activation=tf.nn.relu)
logits = tfp.layers.dense_local_reparameterization(net, 10)
neg_log_likelihood = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
kl = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
loss = neg_log_likelihood + kl
train_op = tf.train.AdamOptimizer().minimize(loss)
```
It uses local reparameterization gradients to minimize the
Kullback-Leibler divergence up to a constant, also known as the
negative Evidence Lower Bound. It consists of the sum of two terms:
the expected negative log-likelihood, which we approximate via
Monte Carlo; and the KL divergence, which is added via regularizer
terms which are arguments to the layer.
"""
layer = DenseLocalReparameterization(
units,
activation=activation,
activity_regularizer=activity_regularizer,
trainable=trainable,
kernel_posterior_fn=kernel_posterior_fn,
kernel_posterior_tensor_fn=kernel_posterior_tensor_fn,
kernel_prior_fn=kernel_prior_fn,
kernel_divergence_fn=kernel_divergence_fn,
bias_posterior_fn=bias_posterior_fn,
bias_posterior_tensor_fn=bias_posterior_tensor_fn,
bias_prior_fn=bias_prior_fn,
bias_divergence_fn=bias_divergence_fn,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
class DenseFlipout(_DenseVariational):
"""Densely-connected layer class with Flipout estimator.
This layer implements the Bayesian variational inference analogue to
a dense layer by assuming the `kernel` and/or the `bias` are drawn
from distributions. By default, the layer implements a stochastic
forward pass via sampling from the kernel and bias posteriors,
```none
kernel, bias ~ posterior
outputs = activation(matmul(inputs, kernel) + bias)
```
The arguments permit separate specification of the surrogate posterior
(`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`
distributions.
Args:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (`callable`). Set it to None to maintain a
linear activation.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
kernel_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `kernel` parameter. Default value:
`default_mean_field_normal_fn()`.
kernel_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
kernel_prior_fn: Python `callable` which creates `tf.distributions`
instance. See `default_mean_field_normal_fn` docstring for required
parameter signature.
Default value: `tf.distributions.Normal(loc=0., scale=1.)`.
kernel_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
bias_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `bias` parameter. Default value:
`default_mean_field_normal_fn(is_singular=True)` (which creates an
instance of `tf.distributions.Deterministic`).
bias_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
bias_prior_fn: Python `callable` which creates `tf.distributions` instance.
See `default_mean_field_normal_fn` docstring for required parameter
signature. Default value: `None` (no prior, no variational inference)
bias_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
seed: Python scalar `int` which initializes the random number
generator. Default value: `None` (i.e., use global seed).
name: Python `str`, the name of the layer. Layers with the same name will
share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in
such cases.
reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous
layer by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (`callable`).
activity_regularizer: Regularizer function for the output.
kernel_posterior_fn: `callable` returning posterior.
kernel_posterior_tensor_fn: `callable` operating on posterior.
kernel_prior_fn: `callable` returning prior.
kernel_divergence_fn: `callable` returning divergence.
bias_posterior_fn: `callable` returning posterior.
bias_posterior_tensor_fn: `callable` operating on posterior.
bias_prior_fn: `callable` returning prior.
bias_divergence_fn: `callable` returning divergence.
seed: Python integer, used to create random seeds.
#### Examples
We illustrate a Bayesian neural network with [variational inference](
https://en.wikipedia.org/wiki/Variational_Bayesian_methods),
assuming a dataset of `features` and `labels`.
```python
tfp = tf.contrib.bayesflow
net = tfp.layers.DenseFlipout(
512, activation=tf.nn.relu)(features)
logits = tfp.layers.DenseFlipout(10)(net)
neg_log_likelihood = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
kl = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
loss = neg_log_likelihood + kl
train_op = tf.train.AdamOptimizer().minimize(loss)
```
It uses the Flipout gradient estimator to minimize the
Kullback-Leibler divergence up to a constant, also known as the
negative Evidence Lower Bound. It consists of the sum of two terms:
the expected negative log-likelihood, which we approximate via
Monte Carlo; and the KL divergence, which is added via regularizer
terms which are arguments to the layer.
"""
def __init__(
self,
units,
activation=None,
activity_regularizer=None,
trainable=True,
kernel_posterior_fn=layers_util.default_mean_field_normal_fn(),
kernel_posterior_tensor_fn=lambda d: d.sample(),
kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda
loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)),
kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
bias_posterior_fn=layers_util.default_mean_field_normal_fn(
is_singular=True),
bias_posterior_tensor_fn=lambda d: d.sample(),
bias_prior_fn=None,
bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
seed=None,
name=None,
**kwargs):
super(DenseFlipout, self).__init__(
units=units,
activation=activation,
activity_regularizer=activity_regularizer,
trainable=trainable,
kernel_posterior_fn=kernel_posterior_fn,
kernel_posterior_tensor_fn=kernel_posterior_tensor_fn,
kernel_prior_fn=kernel_prior_fn,
kernel_divergence_fn=kernel_divergence_fn,
bias_posterior_fn=bias_posterior_fn,
bias_posterior_tensor_fn=bias_posterior_tensor_fn,
bias_prior_fn=bias_prior_fn,
bias_divergence_fn=bias_divergence_fn,
name=name,
**kwargs)
self.seed = seed
def _apply_variational_kernel(self, inputs):
if (not isinstance(self.kernel_posterior, independent_lib.Independent) or
not isinstance(self.kernel_posterior.distribution, normal_lib.Normal)):
raise TypeError(
"`DenseFlipout` requires "
"`kernel_posterior_fn` produce an instance of "
"`tf.distributions.Independent(tf.distributions.Normal)` "
"(saw: \"{}\").".format(type(self.kernel_posterior).__name__))
self.kernel_posterior_affine = normal_lib.Normal(
loc=array_ops.zeros_like(self.kernel_posterior.distribution.loc),
scale=self.kernel_posterior.distribution.scale)
self.kernel_posterior_affine_tensor = (
self.kernel_posterior_tensor_fn(self.kernel_posterior_affine))
self.kernel_posterior_tensor = None
input_shape = array_ops.shape(inputs)
batch_shape = input_shape[:-1]
sign_input = random_sign(input_shape, dtype=inputs.dtype, seed=self.seed)
sign_output = random_sign(
array_ops.concat([batch_shape,
array_ops.expand_dims(self.units, 0)], 0),
dtype=inputs.dtype,
seed=distribution_util.gen_new_seed(
self.seed, salt="dense_flipout"))
perturbed_inputs = self._matmul(
inputs * sign_input, self.kernel_posterior_affine_tensor) * sign_output
outputs = self._matmul(inputs, self.kernel_posterior.distribution.loc)
outputs += perturbed_inputs
return outputs
def dense_flipout(
inputs,
units,
activation=None,
activity_regularizer=None,
trainable=True,
kernel_posterior_fn=layers_util.default_mean_field_normal_fn(),
kernel_posterior_tensor_fn=lambda d: d.sample(),
kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda
loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)),
kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
bias_posterior_fn=layers_util.default_mean_field_normal_fn(
is_singular=True),
bias_posterior_tensor_fn=lambda d: d.sample(),
bias_prior_fn=None,
bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
seed=None,
name=None,
reuse=None):
"""Densely-connected layer with Flipout estimator.
This layer implements the Bayesian variational inference analogue to
a dense layer by assuming the `kernel` and/or the `bias` are drawn
from distributions. By default, the layer implements a stochastic
forward pass via sampling from the kernel and bias posteriors,
```none
kernel, bias ~ posterior
outputs = activation(matmul(inputs, kernel) + bias)
```
The arguments permit separate specification of the surrogate posterior
(`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`
distributions.
Args:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (`callable`). Set it to None to maintain a
linear activation.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
kernel_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `kernel` parameter. Default value:
`default_mean_field_normal_fn()`.
kernel_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
kernel_prior_fn: Python `callable` which creates `tf.distributions`
instance. See `default_mean_field_normal_fn` docstring for required
parameter signature.
Default value: `tf.distributions.Normal(loc=0., scale=1.)`.
kernel_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
bias_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `bias` parameter. Default value:
`default_mean_field_normal_fn(is_singular=True)` (which creates an
instance of `tf.distributions.Deterministic`).
bias_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
bias_prior_fn: Python `callable` which creates `tf.distributions` instance.
See `default_mean_field_normal_fn` docstring for required parameter
signature. Default value: `None` (no prior, no variational inference)
bias_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
seed: Python scalar `int` which initializes the random number
generator. Default value: `None` (i.e., use global seed).
name: Python `str`, the name of the layer. Layers with the same name will
share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in
such cases.
reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous
layer by the same name.
Returns:
output: `Tensor` representing a the affine transformed input under a random
draw from the surrogate posterior distribution.
#### Examples
We illustrate a Bayesian neural network with [variational inference](
https://en.wikipedia.org/wiki/Variational_Bayesian_methods),
assuming a dataset of `features` and `labels`.
```python
tfp = tf.contrib.bayesflow
net = tfp.layers.dense_flipout(
features, 512, activation=tf.nn.relu)
logits = tfp.layers.dense_flipout(net, 10)
neg_log_likelihood = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
kl = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
loss = neg_log_likelihood + kl
train_op = tf.train.AdamOptimizer().minimize(loss)
```
It uses the Flipout gradient estimator to minimize the
Kullback-Leibler divergence up to a constant, also known as the
negative Evidence Lower Bound. It consists of the sum of two terms:
the expected negative log-likelihood, which we approximate via
Monte Carlo; and the KL divergence, which is added via regularizer
terms which are arguments to the layer.
"""
layer = DenseFlipout(
units,
activation=activation,
activity_regularizer=activity_regularizer,
trainable=trainable,
kernel_posterior_fn=kernel_posterior_fn,
kernel_posterior_tensor_fn=kernel_posterior_tensor_fn,
kernel_prior_fn=kernel_prior_fn,
kernel_divergence_fn=kernel_divergence_fn,
bias_posterior_fn=bias_posterior_fn,
bias_posterior_tensor_fn=bias_posterior_tensor_fn,
bias_prior_fn=bias_prior_fn,
bias_divergence_fn=bias_divergence_fn,
seed=seed,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
def random_sign(shape, dtype=dtypes.float32, seed=None):
"""Draw values from {-1, 1} uniformly, i.e., Rademacher distribution."""
random_bernoulli = random_ops.random_uniform(shape, minval=0, maxval=2,
dtype=dtypes.int32,
seed=seed)
return math_ops.cast(2 * random_bernoulli - 1, dtype)
| 45.734089
| 116
| 0.737251
| 6,730
| 52,457
| 5.54948
| 0.054829
| 0.042171
| 0.030952
| 0.031488
| 0.901253
| 0.880342
| 0.858145
| 0.849256
| 0.838867
| 0.831852
| 0
| 0.002104
| 0.184646
| 52,457
| 1,146
| 117
| 45.773997
| 0.871104
| 0.628972
| 0
| 0.595745
| 0
| 0
| 0.035899
| 0.015754
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040189
| false
| 0
| 0.040189
| 0
| 0.122931
| 0.002364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a75472f4b186ce4433399e3535634ff879b1b81c
| 58
|
py
|
Python
|
dsb2015/models/__init__.py
|
rmunoz12/dsb2015
|
5f277658ecd49a1fd751c897367715811fa81668
|
[
"MIT"
] | null | null | null |
dsb2015/models/__init__.py
|
rmunoz12/dsb2015
|
5f277658ecd49a1fd751c897367715811fa81668
|
[
"MIT"
] | null | null | null |
dsb2015/models/__init__.py
|
rmunoz12/dsb2015
|
5f277658ecd49a1fd751c897367715811fa81668
|
[
"MIT"
] | null | null | null |
from .alexnet import get_alexnet
from .vgg import get_vgg
| 19.333333
| 32
| 0.827586
| 10
| 58
| 4.6
| 0.5
| 0.391304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 58
| 2
| 33
| 29
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a773118785193ba98c5f10f1dd015dc36d732948
| 169
|
py
|
Python
|
ProgressReport2/Testing.py
|
TainedeBlaze/EEE3097S-Project
|
93772557d9a795cb8411657d08d9a44f640ba1ee
|
[
"MIT"
] | null | null | null |
ProgressReport2/Testing.py
|
TainedeBlaze/EEE3097S-Project
|
93772557d9a795cb8411657d08d9a44f640ba1ee
|
[
"MIT"
] | null | null | null |
ProgressReport2/Testing.py
|
TainedeBlaze/EEE3097S-Project
|
93772557d9a795cb8411657d08d9a44f640ba1ee
|
[
"MIT"
] | null | null | null |
import glob
import os
#os.system('python3 SendData.py ' + "IMU-data-2021-10-25-20:38:19.csv" )
os.system('python3 RecieveData.py' + " encrypted_fileofIMU.txt")
| 28.166667
| 73
| 0.692308
| 26
| 169
| 4.461538
| 0.769231
| 0.137931
| 0.258621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110345
| 0.142012
| 169
| 6
| 74
| 28.166667
| 0.689655
| 0.420118
| 0
| 0
| 0
| 0
| 0.494624
| 0.247312
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a7a600f9a55e01c7109c32fa9272219902c3de44
| 113
|
py
|
Python
|
tests/basetest.py
|
NerdPraise/UKUFU-TASK
|
3f3018a1d50706fa23fdacd13f644669c10406f0
|
[
"MIT"
] | null | null | null |
tests/basetest.py
|
NerdPraise/UKUFU-TASK
|
3f3018a1d50706fa23fdacd13f644669c10406f0
|
[
"MIT"
] | null | null | null |
tests/basetest.py
|
NerdPraise/UKUFU-TASK
|
3f3018a1d50706fa23fdacd13f644669c10406f0
|
[
"MIT"
] | null | null | null |
from rest_framework.test import APITestCase
# Create your tests here.
class BaseTestCase(APITestCase):
pass
| 18.833333
| 43
| 0.79646
| 14
| 113
| 6.357143
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150442
| 113
| 5
| 44
| 22.6
| 0.927083
| 0.20354
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
ac2c63dca724299b8cea31fb72410cb5561b6f1e
| 194
|
py
|
Python
|
day-11/package_demo/package1/pathprint.py
|
JohnLockwood/100-days-of-python
|
352b3b0861e1e1228b54079e39c1d0a83ef9af6c
|
[
"Apache-2.0"
] | null | null | null |
day-11/package_demo/package1/pathprint.py
|
JohnLockwood/100-days-of-python
|
352b3b0861e1e1228b54079e39c1d0a83ef9af6c
|
[
"Apache-2.0"
] | null | null | null |
day-11/package_demo/package1/pathprint.py
|
JohnLockwood/100-days-of-python
|
352b3b0861e1e1228b54079e39c1d0a83ef9af6c
|
[
"Apache-2.0"
] | null | null | null |
"""Demo module to show python search path"""
import sys
import pprint
def print_path():
"""Pretty print the system path"""
print("Printing the search path:")
pprint.pprint(sys.path)
| 24.25
| 44
| 0.695876
| 28
| 194
| 4.785714
| 0.571429
| 0.149254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180412
| 194
| 8
| 45
| 24.25
| 0.842767
| 0.345361
| 0
| 0
| 0
| 0
| 0.213675
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.6
| 0.8
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
3bc2aa086fed44b5e36a5921fabb9eeb08aa65ae
| 81
|
py
|
Python
|
mitreattack/__init__.py
|
wetkind/mitreattack-python
|
f2406cac6b8d104d280712fccf9c50637ae05fbb
|
[
"Apache-2.0"
] | 137
|
2021-04-06T17:40:20.000Z
|
2022-03-30T18:27:44.000Z
|
mitreattack/__init__.py
|
wetkind/mitreattack-python
|
f2406cac6b8d104d280712fccf9c50637ae05fbb
|
[
"Apache-2.0"
] | 33
|
2021-04-07T13:41:39.000Z
|
2022-03-25T14:37:40.000Z
|
mitreattack/__init__.py
|
wetkind/mitreattack-python
|
f2406cac6b8d104d280712fccf9c50637ae05fbb
|
[
"Apache-2.0"
] | 29
|
2021-04-06T21:14:40.000Z
|
2022-03-31T15:26:27.000Z
|
from .attackToExcel import *
from .navlayers import *
from .collections import *
| 20.25
| 28
| 0.777778
| 9
| 81
| 7
| 0.555556
| 0.31746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 81
| 3
| 29
| 27
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ce3f6d2c4d9cecf42f273ac3ac1ebe1a697d9983
| 167
|
py
|
Python
|
xgp/tests/test_regressor.py
|
MaxHalford/xgp-python
|
f93059f46dedd8712578a2bd0f45e5f9f18a2c63
|
[
"BSD-3-Clause"
] | 7
|
2018-05-24T07:57:56.000Z
|
2021-11-16T17:34:26.000Z
|
xgp/tests/test_regressor.py
|
MaxHalford/xgp-python
|
f93059f46dedd8712578a2bd0f45e5f9f18a2c63
|
[
"BSD-3-Clause"
] | 2
|
2018-06-01T17:02:35.000Z
|
2020-03-15T07:09:53.000Z
|
xgp/tests/test_regressor.py
|
MaxHalford/xgp-python
|
f93059f46dedd8712578a2bd0f45e5f9f18a2c63
|
[
"BSD-3-Clause"
] | 1
|
2021-11-16T17:34:27.000Z
|
2021-11-16T17:34:27.000Z
|
from sklearn.utils.estimator_checks import check_estimator
import xgp
def test_regressor_check_estimator():
return
return check_estimator(xgp.XGPRegressor)
| 18.555556
| 58
| 0.820359
| 21
| 167
| 6.238095
| 0.619048
| 0.320611
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131737
| 167
| 8
| 59
| 20.875
| 0.903448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ce445002912c2b7e95efb447384b2ade20749c71
| 38,400
|
py
|
Python
|
textadapter/tests/test_TextAdapter.py
|
ContinuumIO/TextAdapter
|
53138c2277cdfcf32e127251313d4f77f81050aa
|
[
"BSD-3-Clause"
] | 22
|
2016-11-09T12:20:04.000Z
|
2021-02-07T03:07:58.000Z
|
textadapter/tests/test_TextAdapter.py
|
blaze/TextAdapter
|
53138c2277cdfcf32e127251313d4f77f81050aa
|
[
"BSD-3-Clause"
] | 3
|
2016-11-01T03:43:03.000Z
|
2017-02-27T20:19:05.000Z
|
textadapter/tests/test_TextAdapter.py
|
ContinuumIO/TextAdapter
|
53138c2277cdfcf32e127251313d4f77f81050aa
|
[
"BSD-3-Clause"
] | 9
|
2017-01-31T21:28:57.000Z
|
2021-12-15T04:22:53.000Z
|
#!/usr/bin/python
import sys
import textadapter
import unittest
from .generate import (generate_dataset, IntIter,
MissingValuesIter, FixedWidthIter)
import numpy as np
from numpy.testing import assert_array_equal
import gzip
import os
import io
from six import StringIO
class TestTextAdapter(unittest.TestCase):
num_records = 100000
def assert_equality(self, left, right):
try:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
self.assert_array_equal(left, right)
else:
self.assertTrue(left == right)
except AssertionError:
raise AssertionError('FAIL: {0} != {1}'.format(left, right))
# Basic parsing tests
def test_string_parsing(self):
data = StringIO('1,2,3\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S5', 1:'S5', 2:'S5'})
assert_array_equal(adapter[:], np.array([('1', '2', '3')], dtype='S5,S5,S5'))
data = io.StringIO(u'1,2,3\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S5', 1:'S5', 2:'S5'})
assert_array_equal(adapter[:], np.array([('1', '2', '3')], dtype='S5,S5,S5'))
data = io.BytesIO(b'1,2,3\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S5', 1:'S5', 2:'S5'})
assert_array_equal(adapter[:], np.array([('1', '2', '3')], dtype='S5,S5,S5'))
# basic utf_8 tests
def test_utf8_parsing(self):
# test single byte character
data = io.BytesIO(u'1,2,\u0033'.encode('utf_8'))
adapter = textadapter.text_adapter(data, field_names=False)
expected = np.array([('1', '2', '3')], dtype='u8,u8,u8')
assert_array_equal(adapter[:], expected)
# test multibyte character
data = io.BytesIO(u'1,2,\u2092'.encode('utf_8'))
adapter = textadapter.text_adapter(data, field_names=False)
expected = np.array([('1', '2', u'\u2092')], dtype='u8,u8,O')
assert_array_equal(adapter[:], expected)
def test_no_whitespace_stripping(self):
data = StringIO('1 ,2 ,3 \n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S3', 1:'S3', 2:'S3'})
assert_array_equal(adapter[:], np.array([('1 ', '2 ', '3 ')], dtype='S3,S3,S3'))
data = StringIO(' 1, 2, 3\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S3', 1:'S3', 2:'S3'})
assert_array_equal(adapter[:], np.array([(' 1', ' 2', ' 3')], dtype='S3,S3,S3'))
data = StringIO(' 1 , 2 , 3 \n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S5', 1:'S5', 2:'S5'})
assert_array_equal(adapter[:], np.array([(' 1 ', ' 2 ', ' 3 ')], dtype='S5,S5,S5'))
data = StringIO('\t1\t,\t2\t,\t3\t\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S3', 1:'S3', 2:'S3'})
assert_array_equal(adapter[:], np.array([('\t1\t', '\t2\t', '\t3\t')], dtype='S3,S3,S3'))
def test_quoted_whitespace(self):
data = StringIO('"1 ","2 ","3 "\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S3', 1:'S3', 2:'S3'})
assert_array_equal(adapter[:], np.array([('1 ', '2 ', '3 ')], dtype='S3,S3,S3'))
data = StringIO('"\t1\t"\t"\t2\t"\t"\t3\t"\n')
adapter = textadapter.text_adapter(data, field_names=False, delimiter='\t')
adapter.set_field_types({0:'S3', 1:'S3', 2:'S3'})
assert_array_equal(adapter[:], np.array([('\t1\t', '\t2\t', '\t3\t')], dtype='S3,S3,S3'))
def test_fixed_simple(self):
# TODO: fix this test on 32-bit and on Windows
if tuple.__itemsize__ == 4:
# This test does not work on 32-bit, so we skip it
return
if sys.platform == 'win32':
# This test does not work on Windows
return
data = StringIO(" 1 2 3\n 4 5 67\n890123 4")
adapter = textadapter.FixedWidthTextAdapter(data, 3, infer_types=False, field_names=False)
adapter.set_field_types({0:'i', 1:'i', 2:'i'})
control = np.array([(1, 2, 3), (4, 5, 67), (890, 123, 4)], dtype='i,i,i')
assert_array_equal(adapter[:], control)
def test_spaces_around_numeric_values(self):
data = StringIO(' 1 , -2 , 3.3 , -4.4 \n 5 , -6 , 7.7 , -8.8 ')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'u4', 1:'i8', 2:'f4', 3:'f8'})
array = adapter[:]
control = np.array([(1,-2,3.3,-4.4), (5,-6,7.7,-8.8)], dtype='u4,i8,f4,f8')
assert_array_equal(array, control)
def test_slicing(self):
data = StringIO()
generate_dataset(data, IntIter(), ',', self.num_records)
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
assert_array_equal(adapter[0], np.array([(0, 1, 2, 3, 4)], dtype='u4,u4,u4,u4,u4'))
expected_values = [((self.num_records-1)*5)+x for x in range(5)]
self.assert_equality(adapter[self.num_records-1].item(), tuple(expected_values))
#adapter.create_index()
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
self.assert_equality(adapter['f0'][0].item(), (0,))
self.assert_equality(adapter['f4'][1].item(), (9,))
#self.assert_equality(adapter[self.num_records-1]['f4'], (self.num_records*5)-1)
array = adapter[:]
record = [x for x in range(0, 5)]
self.assert_equality(array.size, self.num_records)
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
array = adapter[:-1]
record = [x for x in range(0, 5)]
self.assert_equality(array.size, self.num_records-1)
for i in range(0, self.num_records-1):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
array = adapter[0:10]
self.assert_equality(array.size, 10)
record = [x for x in range(0, 5)]
for i in range(0, 10):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
array = adapter[1:]
self.assert_equality(array.size, self.num_records-1)
record = [x for x in range(5, 10)]
for i in range(0, self.num_records-1):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
array = adapter[0:10:2]
self.assert_equality(array.size, 5)
record = [x for x in range(0, 5)]
for i in range(0, 5):
self.assert_equality(array[i].item(), tuple(record))
record = [x+10 for x in record]
array = adapter[['f0', 'f4']][:]
record = [0, 4]
self.assert_equality(array.size, self.num_records)
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
adapter.field_filter = [0, 'f4']
array = adapter[:]
record = [0, 4]
self.assert_equality(array.size, self.num_records)
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
adapter.field_filter = None
array = adapter[:]
record = [0, 1, 2, 3, 4]
self.assert_equality(array.size, self.num_records)
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
try:
adapter[self.num_records]
except textadapter.AdapterIndexError:
pass
else:
self.fail('AdaperIndexError not thrown')
try:
adapter[0:self.num_records+1]
except textadapter.AdapterIndexError:
pass
else:
self.fail('AdaperIndexError not thrown')
def test_converters(self):
data = StringIO()
generate_dataset(data, IntIter(), ',', self.num_records)
adapter = textadapter.text_adapter(data, delimiter=',', field_names=False)
#adapter.set_field_types({0:'u4', 1:'u4', 2:'u4', 3:'u4', 4:'u4'})
def increment(input_str):
return int(input_str) + 1
def double(input_str):
return int(input_str) + int(input_str)
if sys.platform == 'win32' and tuple.__itemsize__ == 8:
# TODO: there problems below here 64-bit Windows, I get
# OverflowError: can't convert negative value to unigned PY_LONG_LONG
return
adapter.set_converter(0, increment)
adapter.set_converter('f1', double)
array = adapter[:]
self.assert_equality(array.size, self.num_records)
record = [1, 2, 2, 3, 4]
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record[0] += 5
record[1] = (10 * (i+1)) + 2
record[2] += 5
record[3] += 5
record[4] += 5
def test_missing_fill_values(self):
data = StringIO()
generate_dataset(data, MissingValuesIter(), ',', self.num_records)
adapter = textadapter.text_adapter(data, delimiter=',', field_names=False, infer_types=False)
adapter.set_field_types({'f0':'u4', 1:'u4', 2:'u4', 3:'u4', 'f4':'u4'})
adapter.set_missing_values({0:['NA', 'NaN'], 'f4':['xx','inf']})
adapter.set_fill_values({0:99, 4:999})
array = adapter[:]
self.assert_equality(array.size, self.num_records)
record = [x for x in range(0, 5)]
for i in range(0, self.num_records):
if i % 4 == 0 or i % 4 == 1:
record[0] = 99
record[4] = 999
else:
record[0] = record[1] - 1
record[4] = record[3] + 1
self.assert_equality(array[i].item(), tuple(record))
record[1] += 5
record[2] += 5
record[3] += 5
data.seek(0)
adapter = textadapter.text_adapter(data, delimiter=',', field_names=False, infer_types=True)
adapter.set_missing_values({0:['NA', 'NaN'], 4:['xx','inf']})
array = adapter[:]
self.assert_equality(array.size, self.num_records)
record = [x for x in range(0, 5)]
for i in range(0, self.num_records):
if i % 4 == 0 or i % 4 == 1:
record[0] = 0
record[4] = 0
else:
record[0] = record[1] - 1
record[4] = record[3] + 1
self.assert_equality(array[i].item(), tuple(record))
record[1] += 5
record[2] += 5
record[3] += 5
# Test missing field
data = StringIO('1,2,3\n4,5\n7,8,9')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.field_types = {0:'O', 1:'O', 2:'O'}
adapter.set_fill_values({0:np.nan, 1:np.nan, 2:np.nan})
array = adapter[:]
# NumPy assert_array_equal no longer supports mixed O/nan types
expected = [('1','2','3'),('4','5',np.nan),('7','8','9')]
self.assert_equality(array.tolist(), expected)
def test_fixed_width(self):
data = StringIO()
generate_dataset(data, FixedWidthIter(), '', self.num_records)
adapter = textadapter.FixedWidthTextAdapter(data, [2,3,4,5,6], field_names=False, infer_types=False)
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
array = adapter[:]
self.assert_equality(array.size, self.num_records)
record = [0, 0, 0, 0, 0]
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+1 for x in record]
if record[0] == 100:
record[0] = 0
if record[1] == 1000:
record[1] = 0
if record[2] == 10000:
record[2] = 0
if record[3] == 100000:
record[3] = 0
if record[4] == 1000000:
record[4] = 0
# Test skipping blank lines
data = StringIO(' 1 2 3\n\n 4 5 6')
adapter = textadapter.text_adapter(data, parser='fixed_width',
field_widths=[2,2,2], field_names=False)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3), (4,5,6)],
dtype=[('f0','<u8'),('f1','<u8'),('f2','<u8')]))
# Test comment lines
data = StringIO('# 1 2 3\n 1 2 3\n# foo\n 4 5 6')
adapter = textadapter.text_adapter(data, parser='fixed_width',
field_widths=[2,2,2], field_names=False)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3), (4,5,6)],
dtype=[('f0','<u8'),('f1','<u8'),('f2','<u8')]))
# Test field names line
data = StringIO(' a b c\n 1 2 3')
adapter = textadapter.text_adapter(data, parser='fixed_width',
field_widths=[2,2,2], field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3)],
dtype=[('a','<u8'),('b','<u8'),('c','<u8')]))
# Test field names line as comment line
data = StringIO('# a b c\n 1 2 3')
adapter = textadapter.text_adapter(data, parser='fixed_width',
field_widths=[2,2,2], field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3)],
dtype=[('a','<u8'),('b','<u8'),('c','<u8')]))
# Test incomplete field names line
data = StringIO(' a\n 1 2 3')
adapter = textadapter.text_adapter(data, parser='fixed_width',
field_widths=[2,2,2], field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3)],
dtype=[('a','<u8'),('f1','<u8'),('f2','<u8')]))
def test_regex(self):
data = StringIO()
generate_dataset(data, IntIter(), ',', self.num_records)
adapter = textadapter.RegexTextAdapter(data, '([0-9]*),([0-9]*),([0-9]*),([0-9]*),([0-9]*)\n', field_names=False, infer_types=False)
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
array = adapter[:]
self.assert_equality(array.size, self.num_records)
record = [x for x in range(0, 5)]
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
# Test skipping blank lines
data = StringIO('1 2 3\n\n4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9]) ([0-9]) ([0-9])', field_names=False)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3), (4,5,6)],
dtype=[('f0','<u8'),('f1','<u8'),('f2','<u8')]))
# Test comment lines
data = StringIO('#1 2 3\n1 2 3\n# foo\n4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9]) ([0-9]) ([0-9])', field_names=False)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3), (4,5,6)],
dtype=[('f0','<u8'),('f1','<u8'),('f2','<u8')]))
# Test field names line
data = StringIO('a b c\n4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9]) ([0-9]) ([0-9])', field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(4,5,6)],
dtype=[('a','<u8'),('b','<u8'),('c','<u8')]))
# Test field names line as comment line
data = StringIO('#a b c\n4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9]) ([0-9]) ([0-9])', field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(4,5,6)],
dtype=[('a','<u8'),('b','<u8'),('c','<u8')]))
# Test incomplete field names line
data = StringIO('a b\n4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9]) ([0-9]) ([0-9])', field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(4,5,6)],
dtype=[('a','<u8'),('b','<u8'),('f2','<u8')]))
# Test field names line that doesn't match regex
data = StringIO('a b c\n1 2 3 4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9\s]+) ([0-9\s]+) ([0-9\s]+)', field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([('1 2', '3 4', '5 6')],
dtype=[('a','O'),('b','O'),('c','O')]))
def test_index(self):
if sys.platform == 'win32':
# TODO: this test fails on Windows because of file lock problems
return
num_records = 100000
expected_values = [((num_records-1)*5) + x for x in range(5)]
data = StringIO()
generate_dataset(data, IntIter(), ',', num_records)
# test explicit index building
adapter = textadapter.text_adapter(data, delimiter=',', field_names=False, infer_types=False)
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
adapter.create_index()
self.assert_equality(adapter[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
# test implicitly creating disk index on the fly
if os.path.exists('test.idx'):
os.remove('test.idx')
data.seek(0)
adapter = textadapter.text_adapter(data, delimiter=',', field_names=False, infer_types=False, index_name='test.idx')
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
adapter.to_array()
self.assert_equality(adapter[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
adapter.close()
# test loading disk index
data.seek(0)
adapter2 = textadapter.text_adapter(data, delimiter=',', field_names=False, infer_types=False, index_name='test.idx')
adapter2.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
self.assert_equality(adapter2[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter2[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter2[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter2[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter2[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter2[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter2[-1].item(), tuple(expected_values))
adapter.close()
os.remove('test.idx')
def test_gzip_index(self):
num_records = 1000000
data = StringIO()
generate_dataset(data, IntIter(), ',', num_records)
#if sys.version > '3':
if True:
dataz = io.BytesIO()
else:
dataz = StringIO()
gzip_output = gzip.GzipFile(fileobj=dataz, mode='wb')
#if sys.version > '3':
if True:
gzip_output.write(data.getvalue().encode('utf8'))
else:
gzip_output.write(data.getvalue())
gzip_output.close()
dataz.seek(0)
# test explicit index building
adapter = textadapter.text_adapter(dataz, compression='gzip', delimiter=',', field_names=False, infer_types=False)
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
adapter.create_index()
self.assert_equality(adapter[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter[100000].item(), tuple([(100000*5) + x for x in range(5)]))
self.assert_equality(adapter[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
# test 'trouble' records that have caused crashes in the past
self.assert_equality(adapter[290000].item(), tuple([(290000*5) + x for x in range(5)]))
self.assert_equality(adapter[818000].item(), tuple([(818000*5) + x for x in range(5)]))
# test implicitly creating disk index on the fly
# JNB: not implemented yet
'''adapter = textadapter.text_adapter(dataz, compression='gzip', delimiter=',', field_names=False, infer_types=False, indexing=True, index_filename='test.idx')
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
adapter.to_array()
self.assert_equality(adapter[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter[100000].item(), tuple([(100000*5) + x for x in range(5)]))
self.assert_equality(adapter[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
# test 'trouble' records that have caused crashes in the past
self.assert_equality(adapter[290000].item(), tuple([(290000*5) + x for x in range(5)]))
self.assert_equality(adapter[818000].item(), tuple([(818000*5) + x for x in range(5)]))
# test loading disk index
adapter2 = textadapter.text_adapter(dataz, compression='gzip', delimiter=',', field_names=False, infer_types=False, indexing=True, index_filename='test.idx')
adapter2.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
self.assert_equality(adapter2[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter2[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter2[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter2[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter2[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter2[100000].item(), tuple([(100000*5) + x for x in range(5)]))
self.assert_equality(adapter2[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
# test 'trouble' records that have caused crashes in the past
self.assert_equality(adapter2[290000].item(), tuple([(290000*5) + x for x in range(5)]))
self.assert_equality(adapter2[818000].item(), tuple([(818000*5) + x for x in range(5)]))
os.remove('test.idx')'''
def test_header_footer(self):
data = StringIO('0,1,2,3,4\n5,6,7,8,9\n10,11,12,13,14')
adapter = textadapter.text_adapter(data, header=1, field_names=False)
adapter.field_types = dict(zip(range(5), ['u4']*5))
assert_array_equal(adapter[:], np.array([(5,6,7,8,9), (10,11,12,13,14)],
dtype='u4,u4,u4,u4,u4'))
data.seek(0)
adapter = textadapter.text_adapter(data, header=2, field_names=False)
adapter.field_types = dict(zip(range(5), ['u4']*5))
assert_array_equal(adapter[:], np.array([(10,11,12,13,14)],
dtype='u4,u4,u4,u4,u4'))
data.seek(0)
adapter = textadapter.text_adapter(data, header=1, field_names=True)
adapter.field_types = dict(zip(range(5), ['u4']*5))
assert_array_equal(adapter[:], np.array([(10,11,12,13,14)],
dtype=[('5','u4'),('6','u4'),('7','u4'),('8','u4'),('9','u4')]))
def test_delimiter(self):
data = StringIO('1,2,3\n')
adapter = textadapter.text_adapter(data, field_names=False)
self.assert_equality(adapter[0].item(), (1,2,3))
data = StringIO('1 2 3\n')
adapter = textadapter.text_adapter(data, field_names=False)
self.assert_equality(adapter[0].item(), (1,2,3))
data = StringIO('1\t2\t3\n')
adapter = textadapter.text_adapter(data, field_names=False)
self.assert_equality(adapter[0].item(), (1,2,3))
data = StringIO('1x2x3\n')
adapter = textadapter.text_adapter(data, field_names=False)
self.assert_equality(adapter[0].item(), (1,2,3))
# Test no delimiter in single field csv data
data = StringIO('aaa\nbbb\nccc')
array = textadapter.text_adapter(data, field_names=False, delimiter=None)[:]
assert_array_equal(array, np.array([('aaa',), ('bbb',), ('ccc',)], dtype=[('f0', 'O')]))
def test_auto_type_inference(self):
data = StringIO('0,1,2,3,4\n5.5,6,7,8,9\n10,11,12,13,14a\n15,16,xxx,18,19')
adapter = textadapter.text_adapter(data, field_names=False, infer_types=True)
array = adapter.to_array()
self.assert_equality(array.dtype.fields['f0'][0], np.dtype('float64'))
self.assert_equality(array.dtype.fields['f1'][0], np.dtype('uint64'))
self.assert_equality(array.dtype.fields['f2'][0], np.dtype('O'))
self.assert_equality(array.dtype.fields['f3'][0], np.dtype('uint64'))
self.assert_equality(array.dtype.fields['f4'][0], np.dtype('O'))
data = StringIO('0,1,2,3,4\n5.5,6,7,8,9\n10,11,12,13,14a\n15,16,xxx,18,19')
adapter = textadapter.text_adapter(data, field_names=False, infer_types=True)
self.assert_equality(adapter[0].dtype.fields['f0'][0], np.dtype('uint64'))
self.assert_equality(adapter[1:3].dtype.fields['f0'][0], np.dtype('float64'))
self.assert_equality(adapter[3].dtype.fields['f4'][0], np.dtype('uint64'))
self.assert_equality(adapter[:].dtype.fields['f3'][0], np.dtype('uint64'))
self.assert_equality(adapter[-1].dtype.fields['f2'][0], np.dtype('O'))
self.assert_equality(adapter[2].dtype.fields['f4'][0], np.dtype('O'))
def test_64bit_ints(self):
data = StringIO(str((2**63)-1) + ',' + str(((2**63)-1)*-1) + ',' + str((2**64)-1))
adapter = textadapter.text_adapter(data, delimiter=',', field_names=False, infer_types=False)
adapter.set_field_types({0:'i8', 1:'i8', 2:'u8'})
array = adapter.to_array()
self.assert_equality(array[0].item(), ((2**63)-1, ((2**63)-1)*-1, (2**64)-1))
def test_adapter_factory(self):
data = StringIO("1,2,3")
adapter = textadapter.text_adapter(data, "csv", delimiter=',', field_names=False, infer_types=False)
self.assertTrue(isinstance(adapter, textadapter.CSVTextAdapter))
self.assertRaises(textadapter.AdapterException, textadapter.text_adapter, data, "foobar")
def test_field_names(self):
# Test for ignoring of extra fields
data = StringIO('f0,f1\n0,1,2\n3,4,5')
adapter = textadapter.text_adapter(data, 'csv', delimiter=',', field_names=True)
array = adapter.to_array()
self.assert_equality(array.dtype.names, ('f0', 'f1'))
self.assert_equality(array[0].item(), (0,1))
self.assert_equality(array[1].item(), (3,4))
# Test for duplicate field names
data = StringIO('f0,field,field\n0,1,2\n3,4,5')
adapter = textadapter.text_adapter(data, 'csv', delimiter=',', field_names=True, infer_types=False)
adapter.set_field_types({0:'u4', 1:'u4', 2:'u4'})
array = adapter.to_array()
self.assert_equality(array.dtype.names, ('f0', 'field', 'field1'))
# Test for field names list
data = StringIO('0,1,2\n3,4,5')
adapter = textadapter.text_adapter(data, field_names=['a', 'b', 'c'], infer_types=False)
adapter.field_types = {0:'u4', 1:'u4', 2:'u4'}
array = adapter[:]
self.assertTrue(array.dtype.names == ('a', 'b', 'c'))
assert_array_equal(array, np.array([(0,1,2), (3,4,5)], dtype=[('a', 'u4'), ('b', 'u4'), ('c', 'u4')]))
def test_float_conversion(self):
data = StringIO('10,1.333,-1.23,10.0E+2,999.9e-2')
adapter = textadapter.text_adapter(data, field_names=False, infer_types=False)
adapter.set_field_types(dict(zip(range(5), ['f8']*5)))
array = adapter[0]
#self.assert_equality(array[0].item(), (10.0,1.333,-1.23,1000.0,9.999))
self.assertAlmostEqual(array[0][0], 10.0)
self.assertAlmostEqual(array[0][1], 1.333)
self.assertAlmostEqual(array[0][2], -1.23)
self.assertAlmostEqual(array[0][3], 1000.0)
self.assertAlmostEqual(array[0][4], 9.999)
def test_generators(self):
def int_generator(num_recs):
for i in range(num_recs):
yield ','.join([str(i*5), str(i*5+1), str(i*5+2), str(i*5+3), str(i*5+4)])
adapter = textadapter.text_adapter(int_generator(self.num_records), field_names=False)
array = adapter[:]
self.assert_equality(array.size, self.num_records)
record = [x for x in range(0, 5)]
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record[0] += 5
record[1] += 5
record[2] += 5
record[3] += 5
record[4] += 5
def test_comments(self):
data = StringIO('1,2,3\n#4,5,6')
adapter = textadapter.text_adapter(data, field_names=False)
array = adapter[:]
self.assert_equality(array.size, 1)
self.assert_equality(array[0].item(), (1,2,3))
data = StringIO('1,2,3\n#4,5,6')
adapter = textadapter.text_adapter(data, field_names=False, comment=None)
array = adapter[:]
self.assert_equality(array.size, 2)
self.assert_equality(array[0].item(), ('1',2,3))
self.assert_equality(array[1].item(), ('#4',5,6))
def test_escapechar(self):
data = StringIO('1,2\\2,3\n4,5\\5\\5,6')
array = textadapter.text_adapter(data, field_names=False)[:]
assert_array_equal(array,
np.array([(1,22,3), (4,555,6)], dtype='u8,u8,u8'))
data = StringIO('\\1,2,3\n4,5,6\\')
array = textadapter.text_adapter(data, field_names=False)[:]
assert_array_equal(array,
np.array([(1,2,3), (4,5,6)], dtype='u8,u8,u8'))
data = StringIO('a,b\\,b,c\na,b\\,b\\,b,c')
array = textadapter.text_adapter(data, field_names=False)[:]
assert_array_equal(array,
np.array([('a', 'b,b', 'c'), ('a', 'b,b,b', 'c')], dtype='O,O,O'))
data = StringIO('a,bx,b,c\na,bx,bx,b,c')
array = textadapter.text_adapter(data, field_names=False, escape='x')[:]
assert_array_equal(array,
np.array([('a', 'b,b', 'c'), ('a', 'b,b,b', 'c')], dtype='O,O,O'))
'''def test_dataframe_output(self):
try:
import pandas
except ImportError:
return
# Test filling blank lines with fill values if output is dataframe
data = StringIO('1,2,3\n\n4,5,6')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.field_types = {0:'O', 1:'O', 2:'O'}
adapter.set_fill_values({0:np.nan, 1:np.nan, 2:np.nan})
df = adapter.to_dataframe()'''
def test_csv(self):
# Test skipping blank lines
data = StringIO('1,2,3\n\n4,5,6')
adapter = textadapter.text_adapter(data, field_names=False)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3), (4,5,6)],
dtype=[('f0','<u8'),('f1','<u8'),('f2','<u8')]))
def test_json(self):
# Test json number
data = StringIO('{"id":123}')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([(123,)], dtype=[('id', 'u8')]))
# Test json number
data = StringIO('{"id":"xxx"}')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([('xxx',)], dtype=[('id', 'O')]))
# Test multiple values
data = StringIO('{"id":123, "name":"xxx"}')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([(123, 'xxx',)], dtype=[('id', 'u8'), ('name', 'O')]))
# Test multiple records
data = StringIO('[{"id":123, "name":"xxx"}, {"id":456, "name":"yyy"}]')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([(123, 'xxx',), (456, 'yyy')], dtype=[('id', 'u8'), ('name', 'O')]))
# Test multiple objects separated by newlines
data = StringIO('{"id":123, "name":"xxx"}\n{"id":456, "name":"yyy"}')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([(123, 'xxx',), (456, 'yyy')], dtype=[('id', 'u8'), ('name', 'O')]))
data = StringIO('{"id":123, "name":"xxx"}\n')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([(123, 'xxx',)], dtype=[('id', 'u8'), ('name', 'O')]))
# JNB: broken; should be really be supporting the following json inputs?
'''
# Test subarrays
data = StringIO('{"id":123, "names":["xxx","yyy","zzz"]}')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([(123, 'xxx', 'yyy', 'zzz',)],
dtype=[('f0', 'u8'), ('f1', 'O'), ('f2', 'O'), ('f3', 'O')]))
# Test subobjects
data = StringIO('{"id":123, "names":{"a":"xxx", "b":"yyy", "c":"zzz"}}')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([(123, 'xxx', 'yyy', 'zzz',)],
dtype=[('f0', 'u8'), ('f1', 'O'), ('f2', 'O'), ('f3', 'O')]))
'''
# Test ranges
data = StringIO('{"id": 1, "name": "www"}\n'
'{"id": 2, "name": "xxx"}\n'
'{"id": 3, "name": "yyy"}\n'
'{"id": 4, "name": "zzz"}')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[2:4]
assert_array_equal(array, np.array([(3, 'yyy'), (4, 'zzz')],
dtype=[('id', 'u8'), ('name', 'O')]))
# Test column order
data = StringIO('{"xxx": 1, "aaa": 2}\n')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([(1, 2)],
dtype=[('xxx', 'u8'), ('aaa', 'u8')]))
# Test field filter
data = StringIO('{"id": 1, "name": "www"}\n'
'{"id": 2, "name": "xxx"}\n'
'{"id": 3, "name": "yyy"}\n'
'{"id": 4, "name": "zzz"}')
adapter = textadapter.text_adapter(data, parser='json')
adapter.field_filter = ['name']
array = adapter[:]
assert_array_equal(array, np.array([('www',), ('xxx',), ('yyy',), ('zzz',)],
dtype=[('name', 'O')]))
def test_stepping(self):
data = StringIO('0,1\n2,3\n4,5\n6,7\n8,9\n10,11\n12,13\n14,15\n16,17\n18,19')
adapter = textadapter.text_adapter(data, field_names=False)
assert_array_equal(adapter[::2], np.array([(0,1), (4,5), (8,9), (12,13), (16,17)], dtype='u8,u8'))
assert_array_equal(adapter[::3], np.array([(0,1), (6,7), (12,13), (18,19)], dtype='u8,u8'))
def test_num_records(self):
data = StringIO('0,1\n2,3\n4,5\n6,7\n8,9\n10,11\n12,13\n14,15\n16,17\n18,19')
adapter = textadapter.text_adapter(data, field_names=False, num_records=2)
assert_array_equal(adapter[:], np.array([(0, 1), (2, 3)], dtype='u8,u8'))
def run(verbosity=1, num_records=100000):
if num_records < 10:
raise ValueError('number of records for generated datasets must be at least 10')
TestTextAdapter.num_records = num_records
suite = unittest.TestLoader().loadTestsFromTestCase(TestTextAdapter)
return unittest.TextTestRunner(verbosity=verbosity).run(suite)
if __name__ == '__main__':
run()
| 45.229682
| 167
| 0.572865
| 5,412
| 38,400
| 3.947709
| 0.06966
| 0.073391
| 0.093517
| 0.083969
| 0.801498
| 0.782588
| 0.74037
| 0.71561
| 0.690475
| 0.675029
| 0
| 0.066767
| 0.239036
| 38,400
| 848
| 168
| 45.283019
| 0.664385
| 0.054271
| 0
| 0.564189
| 1
| 0.016892
| 0.087573
| 0.014969
| 0
| 0
| 0
| 0.001179
| 0.244932
| 1
| 0.054054
| false
| 0.003378
| 0.016892
| 0.003378
| 0.086149
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
02241afd4e395cfa694093e2c540b15b145886cb
| 6,634
|
py
|
Python
|
tests/test_polyloxpgen.py
|
hoefer-lab/polyloxpgen
|
4edd2c9f5197da5963c7b863c415ade57e9df77b
|
[
"MIT"
] | null | null | null |
tests/test_polyloxpgen.py
|
hoefer-lab/polyloxpgen
|
4edd2c9f5197da5963c7b863c415ade57e9df77b
|
[
"MIT"
] | null | null | null |
tests/test_polyloxpgen.py
|
hoefer-lab/polyloxpgen
|
4edd2c9f5197da5963c7b863c415ade57e9df77b
|
[
"MIT"
] | null | null | null |
### this requires pip install pytest
### if installed, run >>> pytest within the /tests or /polyloxpgen (package) directory
import polyloxpgen.merge
import polyloxpgen.pgen
import numpy as np
import pandas as pd
import os
import time
def test_single_sample_output_bc1020_ld2017():
floc = os.path.join(os.path.dirname(__file__), '')
ref_file = os.path.join(floc, 'original', 'bc1020_BarPipe.txt')
purged_barcodes_ref = np.loadtxt(ref_file, usecols=0, skiprows=1, delimiter='\t', dtype=str)
purged_reads_ref = np.loadtxt(ref_file, usecols=1, skiprows=1, delimiter='\t')
data_minrecs_ref = np.loadtxt(ref_file, usecols=3, skiprows=1, delimiter='\t', dtype=int)
data_pgen_ref = np.loadtxt(ref_file, usecols=4, skiprows=1, delimiter='\t')
# run polylox_merge
df_merged = polyloxpgen.merge.polylox_merge([os.path.join(floc, 'original', 'bc1020.barcode.count.txt')],
['bc1020'], os.path.join(floc, 'temp'), 'bc1020_merged')
# run polylox_pgen
df_pgen = polyloxpgen.pgen.polylox_pgen(os.path.join(floc, 'temp', 'bc1020_merged.txt'),
os.path.join(floc, 'temp'),
'bc1020_pgen',
path_matrix_type='ld_2017')
# read back from resulted file (corresponds to df_pgen)
df_res = pd.read_csv(os.path.join(floc, 'temp', 'bc1020_pgen.txt'), sep='\t', index_col=0)
# delete temporary files in the end
time.sleep(1.0)
if os.path.isfile(os.path.join(floc, 'temp', 'bc1020_merged.txt')):
os.remove(os.path.join(floc, 'temp', 'bc1020_merged.txt'))
if os.path.isfile(os.path.join(floc, 'temp', 'bc1020_pgen.txt')):
os.remove(os.path.join(floc, 'temp', 'bc1020_pgen.txt'))
# check if the same set of barcodes comes out
assert set(purged_barcodes_ref)==set(df_res.index.to_numpy(dtype=str))
# then reindex to get the barcode order the same
df_res = df_res.reindex(purged_barcodes_ref)
# all remaining checks on the reorder df_res
assert np.all(purged_barcodes_ref==df_res.index.to_numpy(dtype=str))
assert np.all(purged_reads_ref==df_res.bc1020)
assert np.all(data_minrecs_ref==df_res.MinRec)
assert np.allclose(data_pgen_ref, df_res.Pgen)
def test_single_sample_output_bc1022_ld2017():
floc = os.path.join(os.path.dirname(__file__), '')
ref_file = os.path.join(floc, 'original', 'bc1022_BarPipe.txt')
purged_barcodes_ref = np.loadtxt(ref_file, usecols=0, skiprows=1, delimiter='\t', dtype=str)
purged_reads_ref = np.loadtxt(ref_file, usecols=1, skiprows=1, delimiter='\t')
data_minrecs_ref = np.loadtxt(ref_file, usecols=3, skiprows=1, delimiter='\t', dtype=int)
data_pgen_ref = np.loadtxt(ref_file, usecols=4, skiprows=1, delimiter='\t')
# run polylox_merge
df_merged = polyloxpgen.merge.polylox_merge([os.path.join(floc, 'original', 'bc1022.barcode.count.txt')],
['bc1022'], os.path.join(floc, 'temp'), 'bc1022_merged')
# run polylox_pgen
df_pgen = polyloxpgen.pgen.polylox_pgen(os.path.join(floc, 'temp', 'bc1022_merged.txt'),
os.path.join(floc, 'temp'),
'bc1022_pgen',
path_matrix_type='ld_2017')
# read back from resulted file (corresponds to df_pgen)
df_res = pd.read_csv(os.path.join(floc, 'temp', 'bc1022_pgen.txt'), sep='\t', index_col=0)
# delete temporary files in the end
time.sleep(1.0)
if os.path.isfile(os.path.join(floc, 'temp', 'bc1022_merged.txt')):
os.remove(os.path.join(floc, 'temp', 'bc1022_merged.txt'))
if os.path.isfile(os.path.join(floc, 'temp', 'bc1022_pgen.txt')):
os.remove(os.path.join(floc, 'temp', 'bc1022_pgen.txt'))
# check if the same set of barcodes comes out
assert set(purged_barcodes_ref)==set(df_res.index.to_numpy(dtype=str))
# then reindex to get the barcode order the same
df_res = df_res.reindex(purged_barcodes_ref)
# all remaining checks on the reorder df_res
assert np.all(purged_barcodes_ref==df_res.index.to_numpy(dtype=str))
assert np.all(purged_reads_ref==df_res.bc1022)
assert np.all(data_minrecs_ref==df_res.MinRec)
assert np.allclose(data_pgen_ref, df_res.Pgen)
def test_two_sample_output_bc1020_bc1022_uniform():
floc = os.path.join(os.path.dirname(__file__), '')
# load reference dataframes
df_merged_ref = pd.read_csv(os.path.join(floc, 'original', 'bc1020_bc1022_merged.txt'), sep='\t', index_col=0)
df_pgen_ref = pd.read_csv(os.path.join(floc, 'original', 'bc1020_bc1022_pgen_uniform.txt'), sep='\t', index_col=0)
# run polylox_merge
df_merged = polyloxpgen.merge.polylox_merge([os.path.join(floc, 'original', 'bc1020.barcode.count.txt'),
os.path.join(floc, 'original', 'bc1022.barcode.count.txt')],
['bc1020', 'bc1022'], os.path.join(floc, 'temp'), 'bc1020_bc1022_merged')
# run polylox_pgen
df_pgen = polyloxpgen.pgen.polylox_pgen(os.path.join(floc, 'temp', 'bc1020_bc1022_merged.txt'),
os.path.join(floc, 'temp'), 'bc1020_bc1022_pgen',
path_matrix_type='uniform')
# read back from resulted files (correspond to df_merged and df_pgen)
df_merged_res = pd.read_csv(os.path.join(floc, 'temp', 'bc1020_bc1022_merged.txt'), sep='\t', index_col=0)
df_pgen_res = pd.read_csv(os.path.join(floc, 'temp', 'bc1020_bc1022_pgen.txt'), sep='\t', index_col=0)
# delete temporary files in the end
time.sleep(1.0)
if os.path.isfile(os.path.join(floc, 'temp', 'bc1020_bc1022_merged.txt')):
os.remove(os.path.join(floc, 'temp', 'bc1020_bc1022_merged.txt'))
if os.path.isfile(os.path.join(floc, 'temp', 'bc1020_bc1022_pgen.txt')):
os.remove(os.path.join(floc, 'temp', 'bc1020_bc1022_pgen.txt'))
# compare merge dataframes
assert np.all(df_merged_ref.index.to_numpy(dtype=str)==df_merged_res.index.to_numpy(dtype=str))
assert np.all(df_merged_ref.bc1020==df_merged_res.bc1020)
assert np.all(df_merged_ref.bc1022==df_merged_res.bc1022)
# compare pgen dataframes
assert np.all(df_pgen_ref.index.to_numpy(dtype=str)==df_pgen_res.index.to_numpy(dtype=str))
assert np.all(df_pgen_ref.bc1020==df_pgen_res.bc1020)
assert np.all(df_pgen_ref.bc1022==df_pgen_res.bc1022)
assert np.all(df_pgen_ref.MinRec==df_pgen_res.MinRec)
assert np.allclose(df_pgen_ref.Pgen, df_pgen_res.Pgen)
| 49.879699
| 118
| 0.666265
| 989
| 6,634
| 4.239636
| 0.115268
| 0.064393
| 0.085857
| 0.110184
| 0.88457
| 0.855235
| 0.814214
| 0.790842
| 0.76103
| 0.735989
| 0
| 0.054671
| 0.194905
| 6,634
| 132
| 119
| 50.257576
| 0.730388
| 0.126771
| 0
| 0.345679
| 0
| 0
| 0.149722
| 0.054129
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.037037
| false
| 0
| 0.074074
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
02324eb2023ae43c54f682a4a2f604864324c111
| 81,093
|
py
|
Python
|
src/sniffmypacketsv2/transforms/common/protocols/ssh.py
|
SneakersInc/sniffmypacketsv2
|
55d8ff70eedb4dd948351425c25a1e904ea6d50e
|
[
"Apache-2.0"
] | 11
|
2015-01-01T19:44:04.000Z
|
2020-03-26T07:30:26.000Z
|
src/sniffmypacketsv2/transforms/common/protocols/ssh.py
|
SneakersInc/sniffmypacketsv2
|
55d8ff70eedb4dd948351425c25a1e904ea6d50e
|
[
"Apache-2.0"
] | 8
|
2015-01-01T22:45:59.000Z
|
2015-12-12T10:37:50.000Z
|
src/sniffmypacketsv2/transforms/common/protocols/ssh.py
|
SneakersInc/sniffmypacketsv2
|
55d8ff70eedb4dd948351425c25a1e904ea6d50e
|
[
"Apache-2.0"
] | 3
|
2017-06-04T05:18:24.000Z
|
2020-03-26T07:30:27.000Z
|
import binascii
import base64
from scapy.layers.inet import *
import dissector
preprocess_sessions = []
sessions = []
def is_created_stream_session(Src, Dst, SPort, DPort):
"""
this method is used for purpose of tcp stream reassemble,
for checking if this is a new session of not.
@param Src: source ip address
@param Dst: destination ip address
@param SPort: source port number
@param DPort: destination port number
"""
i = 0
while i < len(preprocess_sessions):
if Src == preprocess_sessions[i][0] and\
Dst == preprocess_sessions[i][1] and\
SPort == preprocess_sessions[i][2] and\
DPort == preprocess_sessions[i][3]:
return True
i = i + 1
return False
def create_stream_session(Src, Dst, SPort, DPort, stream):
"""
this method is used for purpose of tcp stream reassemble,
for creating a new session.
@param Src: source ip address
@param Dst: destination ip address
@param SPort: source port number
@param DPort: destination port number
@param stream: the initial packet
"""
if stream.push:
sessions.append([Src, Dst, SPort, DPort, stream])
else:
preprocess_sessions.append([Src, Dst, SPort, DPort, stream])
def build_stream(Src, Dst, SPort, DPort, stream):
"""
this method is used for purpose of tcp stream reassemble,
for appending a new packet.
@param Src: source ip address
@param Dst: destination ip address
@param SPort: source port number
@param DPort: destination port number
@param stream: the current packet
"""
i = 0
while i < len(preprocess_sessions):
if Src == preprocess_sessions[i][0] and\
Dst == preprocess_sessions[i][1] and\
SPort == preprocess_sessions[i][2] and\
DPort == preprocess_sessions[i][3]:
if not stream.push:
preprocess_sessions[i][4] =\
preprocess_sessions[i][4].append_data(\
Src, Dst, SPort, DPort, stream)
else:
sessions.append(\
[Src, Dst, SPort, DPort, preprocess_sessions[i][4].append_data(\
Src, Dst, SPort, DPort, stream)])
del(preprocess_sessions[i])
break
i = i + 1
def get_stream(Src, Dst, SPort, DPort, obj):
"""
this method is used for purpose of tcp stream reassemble,
for retrieving a stream or packet.
@param Src: source ip address
@param Dst: destination ip address
@param SPort: source port number
@param DPort: destination port number
@param obj: last packet to be appended
"""
i = 0
while i < len(sessions):
if Src == sessions[i][0] and Dst == sessions[i][1] and\
SPort == sessions[i][2] and DPort == sessions[i][3]:
if sessions[i][4].seq == obj.seq:
return sessions[i][4].pkt
i = i + 1
return -1
def is_stream_end(Src, Dst, SPort, DPort, obj):
"""
this method is used for purpose of tcp stream reassemble,
for checking whether if this is the last packet in the stream or not.
@param Src: source ip address
@param Dst: destination ip address
@param SPort: source port number
@param DPort: destination port number
@param obj: last packet in stream.
"""
i = 0
while i < len(sessions):
if Src == sessions[i][0] and Dst == sessions[i][1] and\
SPort == sessions[i][2] and DPort == sessions[i][3]:
if sessions[i][4].seq == obj.seq:
return True
i = i + 1
return False
class Stream:
"""
this class is for tcp reassembling
"""
pkt = ""
seq = -1
push = None
length_of_last_packet = -1
stream = False
def __init__(self, pkt, push, seq):
"""
this constructor is used for purpose of tcp stream reassemble,
for initializing tcp packets.
@param pkt: packet payload
@param push: specify if push flag is true or false
@param seq: sequence number
"""
self.stream = False
self.pkt = pkt
self.push = push
self.seq = seq
self.length_of_last_packet = len(pkt)
def append_data(self, Src, Dst, SPort, DPort, obj):
"""
this method is used for purpose of tcp stream reassemble,
for appending a packet to an existing stream.
@param Src: source ip address
@param Dst: destination ip address
@param SPort: source port number
@param DPort: destination port number
@param obj: last packet in stream.
"""
if self.seq + self.length_of_last_packet == obj.seq and obj.push:
self.stream = True
self.append_packet(obj.pkt)
self.change_seq(obj.seq)
self.push = obj.push
self.length_of_last_packet = len(obj.pkt)
elif self.seq + self.length_of_last_packet == obj.seq:
self.append_packet(obj.pkt)
self.change_seq(obj.seq)
self.push = obj.push
return self
def append_packet(self, pkt):
"""
this method is used for purpose of tcp stream reassemble,
for appending a packet payload to an existing stream.
@param pkt: packet payload.
"""
self.pkt = self.pkt + pkt
def change_seq(self, seq):
"""
this method is used for purpose of tcp stream reassemble,
for the last packet sequence in the stream.
@param seq: sequence number.
"""
self.seq = seq
def int2bin(n, count=16):
"""
this method converts integer numbers to binary numbers
@param n: the number to be converted
@param count: the number of binary digits
"""
return "".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
# holds ssh encrypted sessions
encryptedsessions = []
def is_created_session(Src, Dst, SPort, DPort):
"""
method returns true if the ssh session is exist
@param Src: source ip address
@param Dst: destination ip address
@param SPort: source port number
@param DPort: destination port number
"""
i = 0
while i < len(encryptedsessions):
if Src and Dst and SPort and DPort in encryptedsessions[i]:
return True
i = i + 1
return False
def create_session(Src, Dst, SPort, DPort, Macl):
"""
method for creating encypted ssh sessions
@param Src: source ip address
@param Dst: destination ip address
@param SPort: source port number
@param DPort: destination port number
"""
if not is_created_session(Src, Dst, SPort, DPort):
encryptedsessions.append([Src, Dst, SPort, DPort, Macl, False])
def set_as_encrypted(Src, Dst, SPort, DPort):
"""
set the ssh session as encrypted
@param Src: source ip address
@param Dst: destination ip address
@param SPort: source port number
@param DPort: destination port number
"""
i = 0
while i < len(encryptedsessions):
if Src and Dst and SPort and DPort in encryptedsessions[i]:
encryptedsessions[i] = [Src, Dst, SPort, DPort,\
encryptedsessions[i][4], True]
i = i + 1
return -1
def is_encrypted_session(Src, Dst, SPort, DPort):
"""
returns true if the ssh session is encrypted
@param Src: source ip address
@param Dst: destination ip address
@param SPort: source port number
@param DPort: destination port number
"""
i = 0
while i < len(encryptedsessions):
if Src and Dst and SPort and DPort and True in encryptedsessions[i]:
return True
i = i + 1
return False
def get_mac_length(Src, Dst, SPort, DPort):
"""
method for maintaining the length of the mac for specific ssh session
@param Src: source ip address
@param Dst: destination ip address
@param SPort: source port number
@param DPort: destination port number
"""
i = 0
while i < len(encryptedsessions):
if Src and Dst and SPort and DPort in encryptedsessions[i]:
return encryptedsessions[i][4]
i = i + 1
return -1
class SSHField(XByteField):
"""
this is a field class for handling the ssh packets
@attention: this class inherets XByteField
"""
found = False
encryptionstarted = False
macstarted = False
maclength = 0
holds_packets = 1
name = "SSHField"
myresult = ""
def get_ascii(self, hexstr):
"""
get hex string and returns ascii chars
@param hexstr: hex value in str format
"""
return binascii.unhexlify(hexstr)
def __init__(self, name, default):
"""
class constructor, for initializing instance variables
@param name: name of the field
@param default: Scapy has many formats to represent the data
internal, human and machine. anyways you may sit this param to None.
"""
self.name = name
self.fmt = "!B"
Field.__init__(self, name, default, "!B")
def get_discnct_msg(self, cn):
"""
method returns a message for every a specific code number
@param cn: code number
"""
codes = {
1: "SSH_DISCONNECT_HOST_NOT_ALLOWED_TO_CONNECT",
2: "SSH_DISCONNECT_PROTOCOL_ERROR",
3: "SSH_DISCONNECT_KEY_EXCHANGE_FAILED",
4: "SSH_DISCONNECT_RESERVED",
5: "SSH_DISCONNECT_MAC_ERROR",
6: "SSH_DISCONNECT_COMPRESSION_ERROR",
7: "SSH_DISCONNECT_SERVICE_NOT_AVAILABLE",
8: "SSH_DISCONNECT_PROTOCOL_VERSION_NOT_SUPPORTED",
9: "SSH_DISCONNECT_HOST_KEY_NOT_VERIFIABLE",
10: "SSH_DISCONNECT_CONNECTION_LOST",
11: "SSH_DISCONNECT_BY_APPLICATION",
12: "SSH_DISCONNECT_TOO_MANY_CONNECTIONS",
13: "SSH_DISCONNECT_AUTH_CANCELLED_BY_USER",
14: "SSH_DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE",
15: "SSH_DISCONNECT_ILLEGAL_USER_NAME",
}
if cn in codes:
return codes[cn] + " "
return "UnknownCode[" + str(cn) + "] "
def get_code_msg(self, cn):
"""
method returns a message for every a specific code number
@param cn: code number
"""
codes = {
1: "SSH_MSG_DISCONNECT",
2: "SSH_MSG_IGNORE",
3: "SSH_MSG_UNIMPLEMENTED",
4: "SSH_MSG_DEBUG",
5: "SSH_MSG_SERVICE_REQUEST",
6: "SSH_MSG_SERVICE_ACCEPT",
20: "SSH_MSG_KEXINIT",
21: "SSH_MSG_NEWKEYS",
30: "SSH_MSG_KEXDH_INIT",
31: "SSH_MSG_KEXDH_REPLY",
32: "SSH_MSG_KEX_DH_GEX_INIT",
33: "SSH_MSG_KEX_DH_GEX_REPLY",
34: "SSH_MSG_KEX_DH_GEX_REQUEST",
50: "SSH_MSG_USERAUTH_REQUEST",
51: "SSH_MSG_USERAUTH_FAILURE",
52: "SSH_MSG_USERAUTH_SUCCESS",
53: "SSH_MSG_USERAUTH_BANNER",
60: "SSH_MSG_USERAUTH_PK_OK",
80: "SSH_MSG_GLOBAL_REQUEST",
81: "SSH_MSG_REQUEST_SUCCESS",
82: "SSH_MSG_REQUEST_FAILURE",
90: "SSH_MSG_CHANNEL_OPEN",
91: "SSH_MSG_CHANNEL_OPEN_CONFIRMATION",
92: "SSH_MSG_CHANNEL_OPEN_FAILURE",
93: "SSH_MSG_CHANNEL_WINDOW_ADJUST",
94: "SSH_MSG_CHANNEL_DATA",
95: "SSH_MSG_CHANNEL_EXTENDED_DATA",
96: "SSH_MSG_CHANNEL_EOF",
97: "SSH_MSG_CHANNEL_CLOSE",
98: "SSH_MSG_CHANNEL_REQUEST",
99: "SSH_MSG_CHANNEL_SUCCESS",
100: "SSH_MSG_CHANNEL_FAILURE"}
if cn in codes:
return codes[cn] + " "
return "UnknownCode[" + str(cn) + "] "
def getfield(self, pkt, s):
"""
this method will get the packet, takes what does need to be
taken and let the remaining go, so it returns two values.
first value which belongs to this field and the second is
the remaining which does need to be dissected with
other "field classes".
@param pkt: holds the whole packet
@param s: holds only the remaining data which is not dissected yet.
"""
ss = -1
flags = None
seq = pkt.underlayer.fields["seq"]
push = False
flags_bits = list(int2bin(pkt.underlayer.fields["flags"]))
if flags_bits[11] == '1':
flags = 'A'
if flags_bits[12] == '1':
flags = flags + 'P'
if 'P' in flags:
push = True
else:
push = False
if not is_created_stream_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"], pkt.underlayer.fields["dport"]):
seqn = pkt.underlayer.fields["seq"]
stream = Stream(s, push, seqn)
create_stream_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"], stream)
elif is_created_stream_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"]):
seqn = pkt.underlayer.fields["seq"]
stream = Stream(s, push, seqn)
build_stream(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"], stream)
if not dissector.Dissector.preprocess_done:
return "", ""
if len(sessions) > 0:
if is_stream_end(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"], stream):
ss = get_stream(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"], stream)
if not ss == -1:
s = ss
else:
return "", ""
self.myresult = ""
resultlist = []
if s.upper().startswith("SSH"):
return "", s
for c in s:
ustruct = struct.unpack(self.fmt, c)
byte = str(hex(ustruct[0]))[2:]
if len(byte) == 1:
byte = "0" + byte
self.myresult = self.myresult + byte
if not s.startswith("SSH") and len(self.myresult) > 12:
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"]):
pakl = str(int(self.myresult[:8], 16))
padl = str(int(self.myresult[8:10], 16))
payloadl = int(pakl) - int(padl) - 1
opcode = self.get_code_msg(int(self.myresult[10:12], 16))
payload = self.myresult[12:12 + payloadl * 2]
padding = self.myresult[12 + payloadl * 2:12 + payloadl * 2\
+ int(padl) * 2]
resultlist.append(("packet_length", pakl))
resultlist.append(("padding_length", padl))
resultlist.append(("opcode", opcode))
if is_encrypted_session(pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"]):
if is_created_session(pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"]):
encrypted_payload = base64.standard_b64encode(\
self.get_ascii(self.myresult[:\
get_mac_length(pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"]) * 2]))
else:
encrypted_payload = base64.standard_b64encode(\
self.myresult[:])
resultlist.append(("encrypted_payload", encrypted_payload))
if is_created_session(pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"]):
mac = base64.standard_b64encode(\
self.get_ascii(self.myresult[\
get_mac_length(pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"]) * 2:]))
resultlist.append(("mac", mac))
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"]) and\
opcode.startswith("SSH_MSG_KEXDH_INIT"):
try:
e_length = int(self.myresult[12:20], 16)
e = base64.standard_b64encode(\
self.get_ascii(self.myresult[20:20 + e_length * 2]))
resultlist.append(("e_length", str(e_length)))
resultlist.append(("e", e))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"]) and\
opcode.startswith("SSH_MSG_KEXDH_REPLY"):
try:
server_public_host_key_and_certificates_K_S_length =\
int(self.myresult[12:20], 16)
server_public_host_key_and_certificates_K_S =\
self.myresult[20:20 +\
server_public_host_key_and_certificates_K_S_length * 2]
f_length = int(self.myresult[20 + \
server_public_host_key_and_certificates_K_S_length\
* 2:20 + server_public_host_key_and_certificates_K_S_length\
* 2 + 8], 16)
f = base64.standard_b64encode(\
self.get_ascii(self.myresult[20 +\
server_public_host_key_and_certificates_K_S_length\
* 2 + 8:20 + server_public_host_key_and_certificates_K_S_length\
* 2 + 8 + f_length * 2]))
signature_of_h_length = int(self.myresult[20 +\
server_public_host_key_and_certificates_K_S_length\
* 2 + 8 + f_length * 2:20 +\
server_public_host_key_and_certificates_K_S_length\
* 2 + 8 + f_length * 2 + 8], 16)
signature_of_h = self.myresult[20 +\
server_public_host_key_and_certificates_K_S_length\
* 2 + 8 + f_length * 2 + 8:20 +\
server_public_host_key_and_certificates_K_S_length\
* 2 + 8 + f_length * 2 + 8 +\
signature_of_h_length * 2]
resultlist.append(\
("server_public_host_key_and_certificates_K_S_length",\
str(server_public_host_key_and_certificates_K_S_length)))
resultlist.append(\
("server_public_host_key_and_certificates_K_S",\
base64.standard_b64encode(\
self.get_ascii(server_public_host_key_and_certificates_K_S))))
resultlist.append(("f_length", str(f_length)))
resultlist.append(("f", f))
resultlist.append(("signature_of_h_length",
str(signature_of_h_length)))
resultlist.append(("signature_of_h",
base64.standard_b64encode(\
self.get_ascii(signature_of_h))))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_SERVICE_REQUEST"):
try:
service_name_length = int(self.myresult[12:20], 16)
service_name = self.myresult[20:20 \
+ service_name_length * 2]
resultlist.append(("service_name_length",
str(service_name_length)))
resultlist.append(("service_name",
base64.standard_b64encode(self.get_ascii(service_name))))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_SERVICE_ACCEPT"):
try:
service_name_length = int(self.myresult[12:20], 16)
service_name = self.myresult[20:20 +\
service_name_length * 2]
resultlist.append(("service_name_length",
str(service_name_length)))
resultlist.append(("service_name",
self.get_ascii(service_name)))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_NEWKEYS"):
try:
set_as_encrypted(pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"])
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_DISCONNECT"):
try:
reason_code = self.get_discnct_msg(int(\
self.myresult[12:20], 16)) * 2
description_length = int(\
self.myresult[20:28], 16)
description = self.myresult[28:28 +\
description_length * 2]
language_tag_length = int(\
self.myresult[28 + description_length * 2:28 +\
description_length * 2 + 8], 16)
language_tag = self.myresult[28 + description_length\
* 2 + 8:28 + description_length * 2 + 8 +\
language_tag_length * 2]
resultlist.append(("reason_code", reason_code))
resultlist.append(("description_length",
str(description_length)))
resultlist.append(("description",
self.get_ascii(description)))
resultlist.append(("language_tag_length",
str(language_tag_length)))
resultlist.append(("language_tag",
self.get_ascii(language_tag)))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_IGNORE"):
try:
data_length = int(self.myresult[12:20], 16)
data = self.myresult[20:20 + data_length * 2]
resultlist.append(("data_length", str(data_length)))
resultlist.append(\
("data", base64.standard_b64encode(self.get_ascii(data))))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_USERAUTH_PK_OK"):
try:
public_key_algorithm_name_from_the_request_length =\
int(self.myresult[12:20], 16)
public_key_algorithm_name_from_the_request =\
self.myresult[20:20 +\
public_key_algorithm_name_from_the_request_length * 2]
public_key_blob_from_the_request_length = int(\
self.myresult[20 + \
public_key_algorithm_name_from_the_request_length * 2:20\
+ public_key_algorithm_name_from_the_request_length * 2\
+ 8], 16)
public_key_blob_from_the_request = self.myresult[20 +\
public_key_algorithm_name_from_the_request_length * 2 +\
8:20 + public_key_algorithm_name_from_the_request_length\
* 2 + 8 + public_key_blob_from_the_request_length * 2]
resultlist.append((\
"public_key_algorithm_name_from_the_request_length",
str(public_key_algorithm_name_from_the_request_length)))
resultlist.append(\
("public_key_algorithm_name_from_the_request",\
self.get_ascii(\
public_key_algorithm_name_from_the_request)))
resultlist.append(\
("public_key_blob_from_the_request_length",
str(public_key_blob_from_the_request_length)))
resultlist.append(("public_key_blob_from_the_request",
self.get_ascii(public_key_blob_from_the_request)))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_DEBUG"):
try:
always_display_boolean = int(self.myresult[12:14], 16)
description_length = int(self.myresult[14:22], 16)
description = self.myresult[22:22 +\
description_length * 2]
language_tag_length = int(self.myresult[22 +\
description_length * 2:22 + description_length\
* 2 + 8], 16)
language_tag = self.myresult[22 + description_length\
* 2 + 8:22 + description_length * 2 + 8 +\
language_tag_length * 2]
resultlist.append(("always_display_boolean",
always_display_boolean))
resultlist.append(("description_length",
str(description_length)))
resultlist.append(("description",
self.get_ascii(description)))
resultlist.append(("language_tag_length",
str(language_tag_length)))
resultlist.append(("language_tag",
self.get_ascii(language_tag)))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_UNIMPLEMENTED"):
try:
seqn = int(self.myresult[12:20], 16)
resultlist.append(\
("packet sequence number of rejected message", seqn))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_CHANNEL_DATA"):
try:
recipient_channel = int(self.myresult[12:20], 16)
data_length = int(self.myresult[20:28], 16)
data = self.myresult[28:28 + data_length * 2]
resultlist.append(("recipient_channel", recipient_channel))
resultlist.append(("data_length", str(data_length)))
resultlist.append(\
("data", base64.standard_b64encode(self.get_ascii(data))))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_USERAUTH_REQUEST"):
try:
user_name_length = int(self.myresult[12:20], 16)
user_name = self.myresult[20:20 + user_name_length * 2]
service_name_length = int(self.myresult[20 +\
user_name_length * 2:20 + user_name_length * 2 + 8], 16)
service_name = self.myresult[20 + user_name_length *\
2 + 8:20 + user_name_length * 2 + 8 +\
service_name_length * 2]
method_name_length = int(self.myresult[20 +\
user_name_length * 2 + 8 + service_name_length *\
2:20 + user_name_length * 2 + 8 + service_name_length\
* 2 + 8], 16)
method_name = self.myresult[20 + user_name_length *\
2 + 8 + service_name_length * 2 + 8:20 +\
user_name_length * 2 + 8 + service_name_length *\
2 + 8 + method_name_length * 2]
resultlist.append(("user_name_length",
str(user_name_length)))
resultlist.append(("user_name",
self.get_ascii(user_name)))
resultlist.append(("service_name_length",
str(service_name_length)))
resultlist.append(("service_name",
self.get_ascii(service_name)))
resultlist.append(("method_name_length",
str(method_name_length)))
resultlist.append(("method_name",
self.get_ascii(method_name)))
if method_name.startswith("publickey"):
boolean = int(self.myresult[20 + user_name_length *\
2 + 8 + service_name_length * 2 + 8 +\
method_name_length * 2:20 + user_name_length *\
2 + 8 + service_name_length * 2 + 8 +\
method_name_length * 2 + 8], 16)
public_key_algorithm_name_length =\
int(self.myresult[20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 + method_name_length\
* 2 + 8:20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 + method_name_length\
* 2 + 8 + 8], 16)
public_key_algorithm_name = self.myresult[20 +\
user_name_length * 2 + 8 + service_name_length *\
2 + 8 + method_name_length * 2 + 8 + 8:20 +\
user_name_length * 2 + 8 + service_name_length *\
2 + 8 + method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2]
resultlist.append(("boolean", boolean))
resultlist.append(("public_key_algorithm_name_length",
str(public_key_algorithm_name_length)))
resultlist.append(("public_key_algorithm_name",
self.get_ascii(public_key_algorithm_name)))
if boolean == 0:
public_key_blob_length =\
int(self.myresult[20 + user_name_length * 2 +\
8 + service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2:20 +\
user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8],
16)
public_key_blob = self.myresult[20 +\
user_name_length * 2 + 8 + service_name_length *\
2 + 8 + method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8:20 +\
user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8 +\
public_key_blob_length * 2]
resultlist.append(("public_key_blob_length",
str(public_key_blob_length)))
resultlist.append(("public_key_blob",
self.get_ascii(\
public_key_blob)))
if boolean != 0:
public_key_to_be_used_for_authentication_length =\
int(self.myresult[20 + user_name_length * 2 +\
8 + service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2:20 +\
user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8],\
16)
public_key_to_be_used_for_authentication =\
self.myresult[20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 + method_name_length\
* 2 + 8 + 8 + public_key_algorithm_name_length\
* 2 + 8:20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8 +\
public_key_blob_length * 2]
signature_length = \
int(self.myresult[20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8 +\
public_key_to_be_used_for_authentication_length\
* 2:20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8 +\
public_key_to_be_used_for_authentication_length\
* 2 + 8], 16)
signature = self.myresult[20 + user_name_length *\
2 + 8 + service_name_length * 2 + 8 + \
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8 +\
public_key_to_be_used_for_authentication_length\
* 2 + 8:20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8 +\
public_key_to_be_used_for_authentication_length\
* 2 + 8 + signature_length * 2]
resultlist.append((\
"public_key_to_be_used_for_authentication_length",
str(public_key_to_be_used_for_authentication_length)))
resultlist.append((\
"public_key_to_be_used_for_authentication",
self.get_ascii(\
public_key_to_be_used_for_authentication)))
resultlist.append(("signature_length",
str(signature_length)))
resultlist.append(("signature",
self.get_ascii(signature)))
if method_name.startswith("password"):
boolean = int(self.myresult[20 + user_name_length\
* 2 + 8 + service_name_length * 2 + 8 +\
method_name_length * 2:20 + user_name_length *\
2 + 8 + service_name_length * 2 + 8 +\
method_name_length * 2 + 8], 16)
resultlist.append(("boolean", boolean))
if boolean == 0:
plaintext_password_length = int(self.myresult[\
20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 + method_name_length\
* 2 + 8:20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8], 16)
plaintext_password = self.myresult[20 +\
user_name_length * 2 + 8 + service_name_length\
* 2 + 8 + method_name_length * 2 + 8 + 8:20 +\
user_name_length * 2 + 8 + service_name_length\
* 2 + 8 + method_name_length * 2 + 8 + 8 +\
plaintext_password_length * 2]
resultlist.append(("plaintext_password_length",
str(plaintext_password_length)))
resultlist.append(("plaintext_password",
self.get_ascii(plaintext_password)))
if boolean != 0:
plaintext_old_password_length =\
int(self.myresult[20 + user_name_length * 2 +\
8 + service_name_length * 2 + 8 +\
method_name_length * 2 + 8:20 +\
user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8], 16)
plaintext_old_password = self.myresult[\
20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8:20 +\
user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
plaintext_old_password_length * 2]
plaintext_new_password_length = \
int(self.myresult[20 + user_name_length * 2 +\
8 + service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
plaintext_old_password_length * 2:20\
+ user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
plaintext_old_password_length * 2 + 8], 16)
plaintext_new_password = self.myresult[\
20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 + method_name_length\
* 2 + 8 + 8 + plaintext_old_password_length\
* 2 + 8:20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
plaintext_old_password_length * 2 +\
plaintext_new_password_length * 2]
resultlist.append(("plaintext_old_password_length",
str(plaintext_old_password_length)))
resultlist.append(("plaintext_old_password",
plaintext_old_password))
resultlist.append(("plaintext_new_password_length",
str(plaintext_new_password_length)))
resultlist.append(("plaintext_new_password",
self.get_ascii(plaintext_new_password)))
if method_name.startswith("hostbased"):
public_key_algorithm_for_host_key_length =\
int(self.myresult[12:20], 16)
public_key_algorithm_for_host_key =\
self.myresult[20:20 +\
public_key_algorithm_for_host_key_length * 2]
public_host_key_and_cert_for_client_host_len =\
int(self.myresult[20 +\
public_key_algorithm_for_host_key_length * 2:20 +\
public_key_algorithm_for_host_key_length *\
2 + 8], 16)
public_host_key_and_certificates_for_client_host =\
self.myresult[20 +\
public_key_algorithm_for_host_key_length * 2 +\
8:20 + public_key_algorithm_for_host_key_length *\
2 + 8 +\
public_host_key_and_cert_for_client_host_len * 2]
client_host_name_length = int(self.myresult[20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len\
* 2:20 + public_key_algorithm_for_host_key_length\
* 2 + 8 + public_host_key_and_cert_for_client_host_len\
* 2 + 8], 16)
client_host_name = self.myresult[20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len\
* 2 + 8:20 + public_key_algorithm_for_host_key_length\
* 2 + 8 + public_host_key_and_cert_for_client_host_len\
* 2 + 8 + client_host_name_length * 2]
user_name_on_the_client_host_length = int(\
self.myresult[20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len\
* 2 + 8 + client_host_name_length * 2:20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len\
* 2 + 8 + client_host_name_length * 2 + 8], 16)
user_name_on_the_client_host = self.myresult[20\
+ public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len * 2 +\
8 + client_host_name_length * 2 + 8:20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len * 2 + 8 +\
client_host_name_length * 2 + 8 +\
user_name_on_the_client_host_length * 2]
signature_length = int(self.myresult[20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len\
* 2 + 8 + client_host_name_length * 2 + 8 +\
user_name_on_the_client_host_length * 2:20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len * 2 + 8 +\
client_host_name_length * 2 + 8 +\
user_name_on_the_client_host_length * 2 + 8], 16)
signature = self.myresult[20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len * 2 +\
8 + client_host_name_length * 2 + 8 +\
user_name_on_the_client_host_length * 2 + 8:20 +\
public_key_algorithm_for_host_key_length * 2 + 8 +\
public_host_key_and_cert_for_client_host_len *\
2 + 8 + client_host_name_length * 2 + 8 +\
user_name_on_the_client_host_length * 2 + 8 +\
signature_length * 2]
resultlist.append(("public_key_algorithm_for\
_host_key_length",
str(public_key_algorithm_for_host_key_length)))
resultlist.append(("public_key_algorithm_for_host_key",
self.get_ascii(public_key_algorithm_for_host_key)))
resultlist.append(\
("public_host_key_and_certificates_for_client_host_length",
str(\
public_host_key_and_cert_for_client_host_len)))
resultlist.append(\
("public_host_key_and_certificates_for_client_host",
self.get_ascii(\
public_host_key_and_certificates_for_client_host)))
resultlist.append(("client_host_name_length",
str(client_host_name_length)))
resultlist.append(("client_host_name",
self.get_ascii(client_host_name)))
resultlist.append(\
("user_name_on_the_client_host_length",\
str(user_name_on_the_client_host_length)))
resultlist.append(("user_name_on_the_client_host",
self.get_ascii(user_name_on_the_client_host)))
resultlist.append(("signature_length",
str(signature_length)))
resultlist.append(("signature",
self.get_ascii(signature)))
else:
method_specific_fields_length = int(self.myresult[\
20 + user_name_length * 2 + 8 + service_name_length * 2 + 8 +\
method_name_length * 2:20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 + method_name_length * 2 + 8], 16)
method_specific_fields = self.myresult[\
20 + user_name_length * 2 + 8 + service_name_length * 2 + 8 +\
method_name_length * 2 + 8:20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 + method_name_length * 2 + 8 +\
method_specific_fields_length * 2]
resultlist.append(("method_specific_fields_length",
str(method_specific_fields_length)))
resultlist.append(("method_specific_fields",
self.get_ascii(method_specific_fields)))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_USERAUTH_FAILURE"):
try:
authentications_that_can_continue_length =\
int(self.myresult[12:20], 16)
authentications_that_can_continue =\
self.myresult[20:20 + authentications_that_can_continue_length * 2]
partial_success_boolean = int(self.myresult[20 +\
authentications_that_can_continue_length * 2:20 +\
authentications_that_can_continue_length * 2 + 8], 16)
resultlist.append(\
("authentications_that_can_continue_length",
str(authentications_that_can_continue_length)))
resultlist.append(("authentications_that_can_continue",
authentications_that_can_continue))
resultlist.append(("partial_success_boolean",
partial_success_boolean))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_USERAUTH_BANNER"):
try:
message_length = int(self.myresult[12:20], 16)
message = self.myresult[20:20 + message_length * 2]
language_tag_length = int(self.myresult[20 +\
message_length * 2:20 + message_length * 2 + 8], 16)
language_tag = self.myresult[20 + message_length * 2\
+ 8:20 + message_length * 2 + 8 + language_tag_length * 2]
resultlist.append(("message_length", str(message_length)))
resultlist.append(("message", self.get_ascii(message)))
resultlist.append(("language_tag_length",
str(language_tag_length)))
resultlist.append(("language_tag",
self.get_ascii(language_tag)))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_KEXINIT"):
try:
cookie = base64.standard_b64encode(self.myresult[12:44])
kex_algorithms_length = int(self.myresult[44:52], 16)
kex_algorithms = self.get_ascii(self.myresult[52:52 +\
kex_algorithms_length * 2])
server_host_key_algorithms_length = int(self.myresult[52 +\
kex_algorithms_length * 2:52 + kex_algorithms_length\
* 2 + 8], 16)
server_host_key_algorithms = self.get_ascii(self.myresult[\
52 + kex_algorithms_length * 2 + 8:52 +\
kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2])
encryption_algorithms_client_to_server_length = int(\
self.myresult[52 + kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2:52 +\
kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8], 16)
encryption_algorithms_client_to_server = self.get_ascii(\
self.myresult[52 + kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8:52 +\
kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2])
encryption_algorithms_server_to_client_length = int(\
self.myresult[52 + kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2:52 +\
kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length *\
2 + 8], 16)
encryption_algorithms_server_to_client = self.get_ascii(\
self.myresult[52 + kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 +\
8:52 + kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 +\
8 + encryption_algorithms_server_to_client_length\
* 2])
mac_algorithms_client_to_server_length = int(\
self.myresult[52 + kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2:52 +\
kex_algorithms_length * 2 + 8 + server_host_key_algorithms_length\
* 2 + 8 + encryption_algorithms_client_to_server_length * 2 +\
8 + encryption_algorithms_server_to_client_length * 2 + 8], 16)
mac_algorithms_client_to_server = self.get_ascii(\
self.myresult[52 + kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8:52 +\
kex_algorithms_length * 2 + 8 + server_host_key_algorithms_length\
* 2 + 8 + encryption_algorithms_client_to_server_length * 2 +\
8 + encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2])
mac_algorithms_server_to_client_length = int(\
self.myresult[52 + kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2:52 +\
kex_algorithms_length * 2 + 8 + server_host_key_algorithms_length\
* 2 + 8 + encryption_algorithms_client_to_server_length * 2 +\
8 + encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8], 16)
mac_algorithms_server_to_client = self.get_ascii(\
self.myresult[52 + kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8:52 +\
kex_algorithms_length * 2 + 8 + server_host_key_algorithms_length\
* 2 + 8 + encryption_algorithms_client_to_server_length * 2 +\
8 + encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2])
compression_algorithms_client_to_server_length =\
int(self.myresult[52 + kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2:52 +\
kex_algorithms_length * 2 + 8 + server_host_key_algorithms_length *\
2 + 8 + encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8], 16)
compression_algorithms_client_to_server = self.get_ascii(\
self.myresult[52 + kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8:52 +\
kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length\
* 2 + 8 + mac_algorithms_client_to_server_length\
* 2 + 8 + mac_algorithms_server_to_client_length\
* 2 + 8 + \
compression_algorithms_client_to_server_length * 2])
compression_algorithms_server_to_client_length = int(\
self.myresult[52 + kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2:52 +\
kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2 + 8], 16)
compression_algorithms_server_to_client = self.get_ascii(\
self.myresult[52 + kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2 + 8:52 +\
kex_algorithms_length * 2 + 8 + \
server_host_key_algorithms_length * 2 +\
8 + encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2 + 8 +\
compression_algorithms_server_to_client_length * 2])
languages_client_to_server_length = int(self.myresult[\
52 + kex_algorithms_length * 2 + 8 + server_host_key_algorithms_length\
* 2 + 8 + encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2 + 8 +\
compression_algorithms_server_to_client_length * 2:52 +\
kex_algorithms_length * 2 + 8 + server_host_key_algorithms_length\
* 2 + 8 + encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2 + 8 +\
compression_algorithms_server_to_client_length * 2 + 8], 16)
languages_client_to_server = self.get_ascii(self.myresult[\
52 + kex_algorithms_length * 2 + 8 + server_host_key_algorithms_length\
* 2 + 8 + encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2 + 8 +\
compression_algorithms_server_to_client_length * 2 + 8:52 +\
kex_algorithms_length * 2 + 8 + server_host_key_algorithms_length\
* 2 + 8 + encryption_algorithms_client_to_server_length * 2 +\
8 + encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length *\
2 + 8 + compression_algorithms_server_to_client_length *\
2 + 8 + languages_client_to_server_length * 2])
languages_client_to_server_length = int(self.myresult[52 +\
kex_algorithms_length * 2 + 8 + server_host_key_algorithms_length * 2 +\
8 + encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length *\
2 + 8 + compression_algorithms_server_to_client_length *\
2 + 8 + languages_client_to_server_length * 2:52 +\
kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2 +\
8 + compression_algorithms_server_to_client_length * 2 +\
8 + languages_client_to_server_length * 2 + 8], 16)
languages_client_to_server = self.get_ascii(self.myresult[\
52 + kex_algorithms_length * 2 + 8 + server_host_key_algorithms_length *\
2 + 8 + encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2 +\
8 + compression_algorithms_server_to_client_length * 2 + 8 +\
languages_client_to_server_length * 2 + 8:52 +\
kex_algorithms_length * 2 + 8 + server_host_key_algorithms_length\
* 2 + 8 + encryption_algorithms_client_to_server_length * 2 +\
8 + encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2 + 8 +\
compression_algorithms_server_to_client_length * 2 + 8 +\
languages_client_to_server_length * 2 + 8 +\
languages_client_to_server_length * 2])
languages_server_to_client_length = int(self.myresult[\
52 + kex_algorithms_length * 2 + 8 + server_host_key_algorithms_length *\
2 + 8 + encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2 + 8 +\
compression_algorithms_server_to_client_length * 2 + 8 +\
languages_client_to_server_length * 2 + 8 +\
languages_client_to_server_length * 2:52 + kex_algorithms_length *\
2 + 8 + server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2 + 8 +\
compression_algorithms_server_to_client_length * 2 + 8 +\
languages_client_to_server_length * 2 + 8 +\
languages_client_to_server_length * 2 + 8], 16)
languages_server_to_client = self.get_ascii(\
self.myresult[52 + kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2 + 8 +\
compression_algorithms_server_to_client_length * 2 + 8 +\
languages_client_to_server_length * 2 + 8 +\
languages_client_to_server_length * 2 + 8:52 +\
kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2 +\
8 + compression_algorithms_server_to_client_length *\
2 + 8 + languages_client_to_server_length * 2 + 8 +\
languages_client_to_server_length * 2 + 8 +\
languages_server_to_client_length * 2])
first_kex_packet_follows_boolean = self.myresult[\
52 + kex_algorithms_length * 2 + 8 + server_host_key_algorithms_length\
* 2 + 8 + encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2 + 8 +\
compression_algorithms_server_to_client_length * 2 + 8 +\
languages_client_to_server_length * 2 + 8 +\
languages_client_to_server_length * 2 + 8 +\
languages_server_to_client_length * 2:52 +\
kex_algorithms_length * 2 + 8 + server_host_key_algorithms_length\
* 2 + 8 + encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2 + 8 +\
compression_algorithms_server_to_client_length * 2 + 8 +\
languages_client_to_server_length * 2 + 8 +\
languages_client_to_server_length * 2 + 8 +\
languages_server_to_client_length * 2 + 2]
reserved = self.myresult[52 + kex_algorithms_length *\
2 + 8 + server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2 + 8 +\
compression_algorithms_server_to_client_length * 2 + 8 +\
languages_client_to_server_length * 2 + 8 +\
languages_client_to_server_length * 2 + 8 +\
languages_server_to_client_length * 2 + 2:52 +\
kex_algorithms_length * 2 + 8 +\
server_host_key_algorithms_length * 2 + 8 +\
encryption_algorithms_client_to_server_length * 2 + 8 +\
encryption_algorithms_server_to_client_length * 2 + 8 +\
mac_algorithms_client_to_server_length * 2 + 8 +\
mac_algorithms_server_to_client_length * 2 + 8 +\
compression_algorithms_client_to_server_length * 2 +\
8 + compression_algorithms_server_to_client_length\
* 2 + 8 + languages_client_to_server_length * 2 +\
8 + languages_client_to_server_length * 2 + 8 +\
languages_server_to_client_length * 2 + 2 + 8]
ctosmac = mac_algorithms_client_to_server.split(",")
stocmac = mac_algorithms_server_to_client.split(",")
i = 0
j = 0
while i < len(ctosmac):
while j < len(stocmac):
if ctosmac[i].startswith(stocmac[j]):
if ctosmac[i].startswith("hmac-sha1"):
create_session(\
pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"], 20)
if ctosmac[i].startswith("hmac-sha1-96"):
create_session(\
pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"], 20)
if ctosmac[i].startswith("hmac-md5"):
create_session(\
pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"], 16)
if ctosmac[i].startswith("hmac-md5-96"):
create_session(\
pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"], 16)
if ctosmac[i].startswith("none"):
create_session(\
pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"], 0)
j = j + 1
i = i + 1
resultlist.append(("cookie", cookie))
resultlist.append(\
("kex_algorithms_length", str(kex_algorithms_length)))
resultlist.append(("kex_algorithms", kex_algorithms))
resultlist.append(\
("server_host_key_algorithms_length",\
str(server_host_key_algorithms_length)))
resultlist.append(\
("server_host_key_algorithms", server_host_key_algorithms))
resultlist.append(\
("encryption_algorithms_client_to_server_length",\
str(encryption_algorithms_client_to_server_length)))
resultlist.append(\
("encryption_algorithms_client_to_server",\
encryption_algorithms_client_to_server))
resultlist.append(\
("encryption_algorithms_server_to_client_length",\
str(encryption_algorithms_server_to_client_length)))
resultlist.append(\
("encryption_algorithms_server_to_client",\
encryption_algorithms_server_to_client))
resultlist.append(\
("mac_algorithms_client_to_server_length",\
str(mac_algorithms_client_to_server_length)))
resultlist.append(\
("mac_algorithms_client_to_server",\
mac_algorithms_client_to_server))
resultlist.append(\
("mac_algorithms_server_to_client_length",\
str(mac_algorithms_server_to_client_length)))
resultlist.append(("mac_algorithms_server_to_client",
mac_algorithms_server_to_client))
resultlist.append(\
("compression_algorithms_client_to_server_length", str(\
compression_algorithms_client_to_server_length)))
resultlist.append(\
("compression_algorithms_client_to_server",\
compression_algorithms_client_to_server))
resultlist.append(\
("compression_algorithms_server_to_client_length", str(\
compression_algorithms_server_to_client_length)))
resultlist.append(\
("compression_algorithms_server_to_client",\
compression_algorithms_server_to_client))
resultlist.append(("languages_client_to_server_length",
str(languages_client_to_server_length)))
resultlist.append(("languages_client_to_server",
languages_client_to_server))
resultlist.append(("languages_server_to_client_length",
str(languages_server_to_client_length)))
resultlist.append(("languages_server_to_client",
languages_server_to_client))
resultlist.append(("first_kex_packet_follows_boolean",
first_kex_packet_follows_boolean))
resultlist.append(("reserved", reserved))
self.found = True
except Exception:
#self.found = False
None
if not self.found and not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"]):
payload = base64.standard_b64encode(\
self.get_ascii(self.myresult[12:payloadl * 2]))
resultlist.append(("payload", payload))
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"]):
resultlist.append(("padding", padding))
if len(self.myresult) > (10 + payloadl * 2 + int(padl) * 2):
resultlist.append(("MAC", self.myresult[10 + payloadl *\
2 + int(padl) * 2:]))
result_str = ""
for item in resultlist:
if len(result_str) == 0:
result_str = item[0] + ": " + item[1]
else:
result_str = result_str + ", " + item[0] + ": " + item[1]
return "", result_str
return "", ""
class SSH(Packet):
"""
class for handling the ssh packets
@attention: this class inherets Packet
"""
name = "ssh"
fields_desc = [SSHField("sshpayload", "")]
bind_layers(TCP, SSH, dport=22)
bind_layers(TCP, SSH, sport=22)
| 52.318065
| 79
| 0.538308
| 8,463
| 81,093
| 4.784001
| 0.043247
| 0.087312
| 0.08457
| 0.039124
| 0.851113
| 0.809544
| 0.770494
| 0.723911
| 0.702102
| 0.676341
| 0
| 0.034843
| 0.378874
| 81,093
| 1,549
| 80
| 52.35184
| 0.768965
| 0.051484
| 0
| 0.549211
| 0
| 0
| 0.059819
| 0.037685
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015026
| false
| 0.019534
| 0.003005
| 0
| 0.049587
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0233c3488fd9dccd4d51d85f3b620c8d634b6e50
| 124
|
py
|
Python
|
src/texttest/repo_checks/test_gocdrepos.py
|
pagero/gocd-pipeline-builder
|
6db292757f15583438c2afe5b8303398629ef585
|
[
"MIT"
] | 12
|
2016-01-21T21:37:17.000Z
|
2021-08-13T20:24:37.000Z
|
src/texttest/repo_checks/test_gocdrepos.py
|
pagero/gocd-pipeline-builder
|
6db292757f15583438c2afe5b8303398629ef585
|
[
"MIT"
] | 1
|
2017-03-14T13:02:28.000Z
|
2017-03-14T13:02:28.000Z
|
src/texttest/repo_checks/test_gocdrepos.py
|
pagero/gocd-pipeline-builder
|
6db292757f15583438c2afe5b8303398629ef585
|
[
"MIT"
] | 5
|
2015-09-23T09:17:22.000Z
|
2019-10-07T12:32:18.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from gocdpb import gocdpb
import sys
gocdpb.repos(sys.argv)
| 17.714286
| 38
| 0.758065
| 18
| 124
| 4.944444
| 0.611111
| 0.269663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009346
| 0.137097
| 124
| 6
| 39
| 20.666667
| 0.82243
| 0.169355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
026f2ed903657bbd43cd041ed295e1916f2af061
| 115
|
py
|
Python
|
app/util/__init__.py
|
TIHLDE/Lepton
|
60ec0793381f1c1b222f305586e8c2d4345fb566
|
[
"MIT"
] | 7
|
2021-03-04T18:49:12.000Z
|
2021-03-08T18:25:51.000Z
|
app/util/__init__.py
|
TIHLDE/Lepton
|
60ec0793381f1c1b222f305586e8c2d4345fb566
|
[
"MIT"
] | 251
|
2021-03-04T19:19:14.000Z
|
2022-03-31T14:47:53.000Z
|
app/util/__init__.py
|
tihlde/Lepton
|
5cab3522c421b76373a5c25f49267cfaef7b826a
|
[
"MIT"
] | 3
|
2021-10-05T19:03:04.000Z
|
2022-02-25T13:32:09.000Z
|
from app.util.enum_utils import EnumUtils
from app.util.utils import now, yesterday, disable_for_loaddata, week_nr
| 38.333333
| 72
| 0.843478
| 19
| 115
| 4.894737
| 0.736842
| 0.150538
| 0.236559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095652
| 115
| 2
| 73
| 57.5
| 0.894231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
027d009ac98880f7d298de5af5c096ca8e670643
| 45
|
py
|
Python
|
stonkclonk/__init__.py
|
MLH-Fellowship/kk-r1-orientation
|
d4f5f32c36a0d40a339798b7403c2be7f6e1cc93
|
[
"MIT"
] | 1
|
2021-09-08T11:59:25.000Z
|
2021-09-08T11:59:25.000Z
|
stonkclonk/__init__.py
|
MLH-Fellowship/stonk-clonk
|
d4f5f32c36a0d40a339798b7403c2be7f6e1cc93
|
[
"MIT"
] | 3
|
2021-02-07T18:35:38.000Z
|
2021-02-08T19:21:34.000Z
|
stonkclonk/__init__.py
|
MLH-Fellowship/stonk-clonk
|
d4f5f32c36a0d40a339798b7403c2be7f6e1cc93
|
[
"MIT"
] | null | null | null |
from stonkclonk.stonkclonk import StonkClonk
| 22.5
| 44
| 0.888889
| 5
| 45
| 8
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 0.97561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0283a82a2d5783d4ec978eccfd98572cda4e2201
| 406
|
py
|
Python
|
cedar/cedar_settings.py
|
stewardshiptools/stewardshiptools
|
ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e
|
[
"MIT"
] | null | null | null |
cedar/cedar_settings.py
|
stewardshiptools/stewardshiptools
|
ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e
|
[
"MIT"
] | 11
|
2020-03-24T15:29:46.000Z
|
2022-03-11T23:14:48.000Z
|
cedar/cedar_settings.py
|
stewardshiptools/stewardshiptools
|
ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e
|
[
"MIT"
] | null | null | null |
from cedar_settings.default_settings import default_settings
from django.contrib.staticfiles.templatetags.staticfiles import static
default_settings['cedar__default_support_url'] = ('text', 'http://www.cedarbox.ca/support/')
default_settings['cedar__default_splash_page_background_img'] = ('text', static('css/cedar8_background.jpg'))
default_settings['cedar__default_datepicker_years'] = ('int', 300)
| 40.6
| 109
| 0.817734
| 50
| 406
| 6.22
| 0.56
| 0.241158
| 0.192926
| 0.26045
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010471
| 0.059113
| 406
| 9
| 110
| 45.111111
| 0.803665
| 0
| 0
| 0
| 0
| 0
| 0.406404
| 0.302956
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
ce4ad5c39f175d199e98d62a509e2bbdb108efa2
| 16,495
|
py
|
Python
|
ee/api/test/test_property_definition.py
|
ld-rale/posthog
|
0fa5b18b2e940cf5cdbe8afc733eb7e3cd4ae810
|
[
"MIT"
] | null | null | null |
ee/api/test/test_property_definition.py
|
ld-rale/posthog
|
0fa5b18b2e940cf5cdbe8afc733eb7e3cd4ae810
|
[
"MIT"
] | null | null | null |
ee/api/test/test_property_definition.py
|
ld-rale/posthog
|
0fa5b18b2e940cf5cdbe8afc733eb7e3cd4ae810
|
[
"MIT"
] | null | null | null |
import urllib.parse
from typing import cast
import pytest
from django.db.utils import IntegrityError
from django.utils import timezone
from rest_framework import status
from ee.models.license import License, LicenseManager
from ee.models.property_definition import EnterprisePropertyDefinition
from posthog.models import EventProperty, Tag
from posthog.models.property_definition import PropertyDefinition
from posthog.test.base import APIBaseTest
class TestPropertyDefinitionEnterpriseAPI(APIBaseTest):
def test_can_set_and_query_property_type_and_format(self):
property = EnterprisePropertyDefinition.objects.create(
team=self.team, name="a timestamp", property_type="DateTime",
)
response = self.client.get(f"/api/projects/@current/property_definitions/{property.id}")
self.assertEqual(response.status_code, status.HTTP_200_OK)
assert response.json()["property_type"] == "DateTime"
query_list_response = self.client.get(f"/api/projects/@current/property_definitions")
self.assertEqual(query_list_response.status_code, status.HTTP_200_OK)
matches = [p["name"] for p in query_list_response.json()["results"] if p["name"] == "a timestamp"]
assert len(matches) == 1
def test_errors_on_invalid_property_type(self):
with pytest.raises(IntegrityError):
EnterprisePropertyDefinition.objects.create(
team=self.team, name="a timestamp", property_type="not an allowed option",
)
def test_retrieve_existing_property_definition(self):
super(LicenseManager, cast(LicenseManager, License.objects)).create(
plan="enterprise", valid_until=timezone.datetime(2500, 1, 19, 3, 14, 7)
)
property = EnterprisePropertyDefinition.objects.create(team=self.team, name="enterprise property")
tag = Tag.objects.create(name="deprecated", team_id=self.team.id)
property.tagged_items.create(tag_id=tag.id)
response = self.client.get(f"/api/projects/@current/property_definitions/{property.id}")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.assertEqual(response_data["name"], "enterprise property")
self.assertEqual(response_data["description"], "")
self.assertEqual(response_data["tags"], ["deprecated"])
def test_retrieve_create_property_definition(self):
super(LicenseManager, cast(LicenseManager, License.objects)).create(
plan="enterprise", valid_until=timezone.datetime(2500, 1, 19, 3, 14, 7)
)
property = PropertyDefinition.objects.create(team=self.team, name="property")
response = self.client.get(f"/api/projects/@current/property_definitions/{property.id}")
self.assertEqual(response.status_code, status.HTTP_200_OK)
enterprise_property = EnterprisePropertyDefinition.objects.all().first()
property.refresh_from_db()
self.assertEqual(enterprise_property.propertydefinition_ptr_id, property.id) # type: ignore
self.assertEqual(enterprise_property.name, property.name) # type: ignore
self.assertEqual(enterprise_property.team.id, property.team.id) # type: ignore
def test_search_property_definition(self):
super(LicenseManager, cast(LicenseManager, License.objects)).create(
plan="enterprise", valid_until=timezone.datetime(2500, 1, 19, 3, 14, 7)
)
tag = Tag.objects.create(name="deprecated", team_id=self.team.id)
EventProperty.objects.create(team=self.team, event="$pageview", property="enterprise property")
enterprise_property = EnterprisePropertyDefinition.objects.create(
team=self.team, name="enterprise property", description=""
)
enterprise_property.tagged_items.create(tag_id=tag.id)
other_property = EnterprisePropertyDefinition.objects.create(
team=self.team, name="other property", description=""
)
other_property.tagged_items.create(tag_id=tag.id)
set_property = EnterprisePropertyDefinition.objects.create(team=self.team, name="$set", description="")
set_property.tagged_items.create(tag_id=tag.id)
response = self.client.get(f"/api/projects/@current/property_definitions/?search=enter")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.assertEqual(len(response_data["results"]), 1)
self.assertEqual(response_data["results"][0]["name"], "enterprise property")
self.assertEqual(response_data["results"][0]["description"], "")
self.assertEqual(response_data["results"][0]["tags"], ["deprecated"])
response = self.client.get(f"/api/projects/@current/property_definitions/?search=enterprise")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.assertEqual(len(response_data["results"]), 1)
# always True if not scoping by event names
self.assertEqual(response_data["results"][0]["is_event_property"], None)
# add event_names=['$pageview'] to get properties that have been seen by this event
response = self.client.get(
f"/api/projects/@current/property_definitions/?search=property&event_names=%5B%22%24pageview%22%5D"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.assertEqual(len(response_data["results"]), 2)
self.assertEqual(response_data["results"][0]["name"], "enterprise property")
self.assertEqual(response_data["results"][0]["is_event_property"], True)
self.assertEqual(response_data["results"][1]["name"], "other property")
self.assertEqual(response_data["results"][1]["is_event_property"], False)
response = self.client.get(f"/api/projects/@current/property_definitions/?search=er pr")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.assertEqual(len(response_data["results"]), 2)
response = self.client.get(f"/api/projects/@current/property_definitions/?search=bust")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.assertEqual(len(response_data["results"]), 0)
response = self.client.get(f"/api/projects/@current/property_definitions/?search=set")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.assertEqual(len(response_data["results"]), 0)
response = self.client.get(f"/api/projects/@current/property_definitions/?search=")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.assertEqual(len(response_data["results"]), 2)
def test_update_property_definition(self):
super(LicenseManager, cast(LicenseManager, License.objects)).create(
plan="enterprise", valid_until=timezone.datetime(2038, 1, 19, 3, 14, 7)
)
property = EnterprisePropertyDefinition.objects.create(team=self.team, name="enterprise property")
response = self.client.patch(
f"/api/projects/@current/property_definitions/{str(property.id)}/",
{"description": "This is a description.", "tags": ["official", "internal"],},
)
response_data = response.json()
self.assertEqual(response_data["description"], "This is a description.")
self.assertEqual(response_data["updated_by"]["first_name"], self.user.first_name)
self.assertEqual(set(response_data["tags"]), {"official", "internal"})
property.refresh_from_db()
self.assertEqual(set(property.tagged_items.values_list("tag__name", flat=True)), {"official", "internal"})
def test_update_property_without_license(self):
property = EnterprisePropertyDefinition.objects.create(team=self.team, name="enterprise property")
response = self.client.patch(
f"/api/projects/@current/property_definitions/{str(property.id)}/", data={"description": "test"},
)
self.assertEqual(response.status_code, status.HTTP_402_PAYMENT_REQUIRED)
self.assertIn("This feature is part of the premium PostHog offering.", response.json()["detail"])
def test_with_expired_license(self):
super(LicenseManager, cast(LicenseManager, License.objects)).create(
plan="enterprise", valid_until=timezone.datetime(2010, 1, 19, 3, 14, 7)
)
property = EnterprisePropertyDefinition.objects.create(team=self.team, name="description test")
response = self.client.patch(
f"/api/projects/@current/property_definitions/{str(property.id)}/", data={"description": "test"},
)
self.assertEqual(response.status_code, status.HTTP_402_PAYMENT_REQUIRED)
self.assertIn("This feature is part of the premium PostHog offering.", response.json()["detail"])
def test_filter_property_definitions(self):
super(LicenseManager, cast(LicenseManager, License.objects)).create(
plan="enterprise", valid_until=timezone.datetime(2500, 1, 19, 3, 14, 7)
)
EnterprisePropertyDefinition.objects.create(team=self.team, name="plan")
EnterprisePropertyDefinition.objects.create(team=self.team, name="purchase")
EnterprisePropertyDefinition.objects.create(team=self.team, name="app_rating")
response = self.client.get("/api/projects/@current/property_definitions/?properties=plan,app_rating")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 2)
for item in response.json()["results"]:
self.assertIn(item["name"], ["plan", "app_rating"])
def test_event_property_definition_no_duplicate_tags(self):
from ee.models.license import License, LicenseManager
super(LicenseManager, cast(LicenseManager, License.objects)).create(
key="key_123", plan="enterprise", valid_until=timezone.datetime(2038, 1, 19, 3, 14, 7), max_users=3,
)
property = EnterprisePropertyDefinition.objects.create(team=self.team, name="description test")
response = self.client.patch(
f"/api/projects/@current/property_definitions/{str(property.id)}/", data={"tags": ["a", "b", "a"]},
)
self.assertListEqual(sorted(response.json()["tags"]), ["a", "b"])
def test_order_ids_first_filter(self):
super(LicenseManager, cast(LicenseManager, License.objects)).create(
plan="enterprise", valid_until=timezone.datetime(2010, 1, 19, 3, 14, 7)
)
# is_first_movie, first_visit
is_first_movie_property = EnterprisePropertyDefinition.objects.create(team=self.team, name="is_first_movie")
first_visit_property = EnterprisePropertyDefinition.objects.create(team=self.team, name="first_visit")
ids = [is_first_movie_property.id, first_visit_property.id]
response = self.client.get("/api/projects/@current/property_definitions/?search=firs")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 2) # first_visit, is_first_movie
self.assertEqual(response.json()["results"][0]["name"], "first_visit")
self.assertEqual(response.json()["results"][1]["name"], "is_first_movie")
order_ids_first_str = f'["{str(ids[0])}"]'
response = self.client.get(
f'/api/projects/@current/property_definitions/?search=firs&{urllib.parse.urlencode({"order_ids_first": order_ids_first_str})}'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 2)
self.assertEqual(response.json()["results"][0]["id"], str(ids[0])) # Test that included id is first item
self.assertEqual(response.json()["results"][0]["name"], "is_first_movie")
response = self.client.get(
f'/api/projects/@current/property_definitions/?search=firs&{urllib.parse.urlencode({"order_ids_first": []})}'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 2) # first_visit, is_first_movie
self.assertEqual(response.json()["results"][0]["name"], "first_visit")
self.assertEqual(response.json()["results"][1]["name"], "is_first_movie")
def test_excluded_ids_filter(self):
super(LicenseManager, cast(LicenseManager, License.objects)).create(
plan="enterprise", valid_until=timezone.datetime(2010, 1, 19, 3, 14, 7)
)
# is_first_movie, first_visit
is_first_movie_property = EnterprisePropertyDefinition.objects.create(team=self.team, name="is_first_movie")
first_visit_property = EnterprisePropertyDefinition.objects.create(team=self.team, name="first_visit")
ids = [is_first_movie_property.id, first_visit_property.id]
response = self.client.get("/api/projects/@current/property_definitions/?search=firs")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 2) # first_visit, is_first_movie
self.assertEqual(response.json()["results"][0]["name"], "first_visit")
self.assertEqual(response.json()["results"][1]["name"], "is_first_movie")
excluded_ids_str = f'["{str(ids[0])}"]'
response = self.client.get(
f'/api/projects/@current/property_definitions/?search=firs&{urllib.parse.urlencode({"excluded_ids": excluded_ids_str})}'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 1)
self.assertEqual(response.json()["results"][0]["id"], str(ids[1]))
self.assertEqual(response.json()["results"][0]["name"], "first_visit")
response = self.client.get(
f'/api/projects/@current/property_definitions/?search=firs&{urllib.parse.urlencode({"excluded_ids": []})}'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 2) # first_visit, is_first_movie
self.assertEqual(response.json()["results"][0]["name"], "first_visit")
self.assertEqual(response.json()["results"][1]["name"], "is_first_movie")
def test_order_ids_first_overrides_excluded_ids_filter(self):
super(LicenseManager, cast(LicenseManager, License.objects)).create(
plan="enterprise", valid_until=timezone.datetime(2010, 1, 19, 3, 14, 7)
)
# is_first_movie, first_visit
is_first_movie_property = EnterprisePropertyDefinition.objects.create(team=self.team, name="is_first_movie")
first_visit_property = EnterprisePropertyDefinition.objects.create(team=self.team, name="first_visit")
ids = [is_first_movie_property.id, first_visit_property.id]
response = self.client.get("/api/projects/@current/property_definitions/?search=firs")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 2) # first_visit, is_first_movie
self.assertEqual(response.json()["results"][0]["name"], "first_visit")
self.assertEqual(response.json()["results"][1]["name"], "is_first_movie")
ids_str = f'["{str(ids[0])}"]'
response = self.client.get(
f'/api/projects/@current/property_definitions/?search=firs&{urllib.parse.urlencode({"excluded_ids": ids_str, "order_ids_first": ids_str})}'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 2)
self.assertEqual(response.json()["results"][0]["id"], str(ids[0]))
self.assertEqual(response.json()["results"][0]["name"], "is_first_movie")
response = self.client.get(
f'/api/projects/@current/property_definitions/?search=firs&{urllib.parse.urlencode({"excluded_ids": [], "order_ids_first": []})}'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["count"], 2) # first_visit, is_first_movie
self.assertEqual(response.json()["results"][0]["name"], "first_visit")
self.assertEqual(response.json()["results"][1]["name"], "is_first_movie")
| 57.274306
| 151
| 0.693362
| 1,941
| 16,495
| 5.704276
| 0.093251
| 0.102962
| 0.130871
| 0.06828
| 0.831828
| 0.814487
| 0.787392
| 0.750903
| 0.72715
| 0.706286
| 0
| 0.01754
| 0.16702
| 16,495
| 287
| 152
| 57.473868
| 0.788282
| 0.027281
| 0
| 0.516529
| 0
| 0.028926
| 0.215048
| 0.10899
| 0
| 0
| 0
| 0
| 0.338843
| 1
| 0.053719
| false
| 0
| 0.049587
| 0
| 0.107438
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ce7f099ef37b6fb26a3014bad92553275b1aedbb
| 117
|
py
|
Python
|
tests/test_poly.py
|
appeltel/smath2018b
|
713ab0c14e86e2b028efb7f29156216226cb9aa2
|
[
"MIT"
] | null | null | null |
tests/test_poly.py
|
appeltel/smath2018b
|
713ab0c14e86e2b028efb7f29156216226cb9aa2
|
[
"MIT"
] | null | null | null |
tests/test_poly.py
|
appeltel/smath2018b
|
713ab0c14e86e2b028efb7f29156216226cb9aa2
|
[
"MIT"
] | null | null | null |
"""
Tests for the poly module
"""
from smath2018b.poly import square
def test_square():
assert square(4) == 16
| 13
| 34
| 0.683761
| 17
| 117
| 4.647059
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074468
| 0.196581
| 117
| 8
| 35
| 14.625
| 0.765957
| 0.213675
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ced0c9214adf612de961eeb0c70c0f1144ab432d
| 578
|
py
|
Python
|
ds2/disjointsets/__init__.py
|
aslisabanci/datastructures
|
f7952801245bc8d386a03d92a38121f558bdacca
|
[
"MIT"
] | 159
|
2017-10-02T22:03:14.000Z
|
2022-03-10T23:02:22.000Z
|
ds2/disjointsets/__init__.py
|
aslisabanci/datastructures
|
f7952801245bc8d386a03d92a38121f558bdacca
|
[
"MIT"
] | 9
|
2019-02-04T14:55:09.000Z
|
2021-06-05T13:30:28.000Z
|
ds2/disjointsets/__init__.py
|
aslisabanci/datastructures
|
f7952801245bc8d386a03d92a38121f558bdacca
|
[
"MIT"
] | 49
|
2017-09-29T17:51:16.000Z
|
2022-03-10T23:12:17.000Z
|
from ds2.disjointsets.disjointsets import ( DisjointSetsMapping,
DisjointSetsLabels,
DisjointSetsForest,
DisjointSetsPathCompression,
DisjointSetsTwoPassPC,
DisjointSetsMergeByHeight,
DisjointSetsMergeByWeight,
DisjointSets
)
| 57.8
| 72
| 0.349481
| 13
| 578
| 15.538462
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004695
| 0.631488
| 578
| 9
| 73
| 64.222222
| 0.943662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.111111
| 0.111111
| 0
| 0.111111
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
0c66c2b32fed219c563c0f8fe8737923cc3c43fe
| 78
|
py
|
Python
|
tests/integration/testdata/buildcmd/Provided/main.py
|
renanmontebelo/aws-sam-cli
|
b5cfc46aa9726b5cd006df8ecc08d1b4eedeb9ea
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 2,959
|
2018-05-08T21:48:56.000Z
|
2020-08-24T14:35:39.000Z
|
tests/integration/testdata/buildcmd/Provided/main.py
|
renanmontebelo/aws-sam-cli
|
b5cfc46aa9726b5cd006df8ecc08d1b4eedeb9ea
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1,469
|
2018-05-08T22:44:28.000Z
|
2020-08-24T20:19:24.000Z
|
tests/integration/testdata/buildcmd/Provided/main.py
|
renanmontebelo/aws-sam-cli
|
b5cfc46aa9726b5cd006df8ecc08d1b4eedeb9ea
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 642
|
2018-05-08T22:09:19.000Z
|
2020-08-17T09:04:37.000Z
|
import requests
def handler(event, context):
return requests.__version__
| 15.6
| 31
| 0.782051
| 9
| 78
| 6.333333
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 78
| 4
| 32
| 19.5
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
0c854d40c9b64db59e0fdb632d28f7965488f92e
| 113
|
py
|
Python
|
unclassifiers/config.py
|
infralight/terraform-unclassifier
|
edb81b86f1c53eb1a3aaf37ca59d054e2145e1e7
|
[
"Apache-2.0"
] | 5
|
2021-01-11T10:04:32.000Z
|
2021-06-07T11:19:51.000Z
|
unclassifiers/config.py
|
infralight/terraform-unclassifier
|
edb81b86f1c53eb1a3aaf37ca59d054e2145e1e7
|
[
"Apache-2.0"
] | 1
|
2021-01-07T15:40:08.000Z
|
2021-01-07T15:40:08.000Z
|
unclassifiers/config.py
|
infralight/terraform-unclassifier
|
edb81b86f1c53eb1a3aaf37ca59d054e2145e1e7
|
[
"Apache-2.0"
] | 1
|
2021-01-07T14:38:41.000Z
|
2021-01-07T14:38:41.000Z
|
class Config:
def __init__(self, classified_types: [str]):
self.classified_types = classified_types
| 22.6
| 48
| 0.716814
| 13
| 113
| 5.692308
| 0.615385
| 0.608108
| 0.513514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19469
| 113
| 4
| 49
| 28.25
| 0.813187
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
0c907939bf6b0f83e1b6efdbc468290650d64ec6
| 143
|
py
|
Python
|
Exercicios/Ex_013.py
|
jotmar/PythonEx
|
bf026518ae5479d5c99ff7a4e95fc383dec22d36
|
[
"MIT"
] | null | null | null |
Exercicios/Ex_013.py
|
jotmar/PythonEx
|
bf026518ae5479d5c99ff7a4e95fc383dec22d36
|
[
"MIT"
] | null | null | null |
Exercicios/Ex_013.py
|
jotmar/PythonEx
|
bf026518ae5479d5c99ff7a4e95fc383dec22d36
|
[
"MIT"
] | null | null | null |
sal = float(input('Qual é o salário do funcionário? R$'))
print(f'Após o aumento de 15%, ele passou a receber R${sal + (sal * 15 / 100):.2f}')
| 47.666667
| 84
| 0.65035
| 27
| 143
| 3.444444
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067797
| 0.174825
| 143
| 2
| 85
| 71.5
| 0.720339
| 0
| 0
| 0
| 0
| 0.5
| 0.762238
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.5
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
|
0
| 6
|
0cc42358712e980c748b580d44a00c9466fd741c
| 98
|
py
|
Python
|
src/reinforcement/__init__.py
|
helenapoleri/reinforcement-filesystem
|
3fb839c07563384adba4abdd0fec61ebf76a2530
|
[
"BSD-2-Clause"
] | null | null | null |
src/reinforcement/__init__.py
|
helenapoleri/reinforcement-filesystem
|
3fb839c07563384adba4abdd0fec61ebf76a2530
|
[
"BSD-2-Clause"
] | null | null | null |
src/reinforcement/__init__.py
|
helenapoleri/reinforcement-filesystem
|
3fb839c07563384adba4abdd0fec61ebf76a2530
|
[
"BSD-2-Clause"
] | null | null | null |
from .agent import *
from .reinforcement import *
from .environment import *
from .config import *
| 24.5
| 28
| 0.765306
| 12
| 98
| 6.25
| 0.5
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153061
| 98
| 4
| 29
| 24.5
| 0.903614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4900f7d7885cbe751f57d2b1cf8aabc4fd41374c
| 145
|
py
|
Python
|
pbj/electrostatics/pb_formulation/__init__.py
|
kstylesc/PBJ
|
0a4440b684c1d028341762a275fb3d51956b8301
|
[
"MIT"
] | null | null | null |
pbj/electrostatics/pb_formulation/__init__.py
|
kstylesc/PBJ
|
0a4440b684c1d028341762a275fb3d51956b8301
|
[
"MIT"
] | null | null | null |
pbj/electrostatics/pb_formulation/__init__.py
|
kstylesc/PBJ
|
0a4440b684c1d028341762a275fb3d51956b8301
|
[
"MIT"
] | null | null | null |
import pbj.electrostatics.pb_formulation.formulations as formulations
import pbj.electrostatics.pb_formulation.preconditioning as preconditioning
| 72.5
| 75
| 0.910345
| 16
| 145
| 8.125
| 0.5
| 0.138462
| 0.353846
| 0.384615
| 0.553846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048276
| 145
| 2
| 75
| 72.5
| 0.942029
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
0b5880aa74802cffbdb7fb9d7350bd21f6e55f4c
| 10,886
|
py
|
Python
|
okr/migrations/0031_auto_20201207_1204.py
|
wdr-data/wdr-okr
|
71c9e6e8d3521b1bb67d30310a93584389de2127
|
[
"MIT"
] | 2
|
2021-07-28T08:46:13.000Z
|
2022-01-19T17:05:48.000Z
|
okr/migrations/0031_auto_20201207_1204.py
|
wdr-data/wdr-okr
|
71c9e6e8d3521b1bb67d30310a93584389de2127
|
[
"MIT"
] | 3
|
2020-11-10T23:34:17.000Z
|
2021-03-31T16:19:21.000Z
|
okr/migrations/0031_auto_20201207_1204.py
|
wdr-data/wdr-okr
|
71c9e6e8d3521b1bb67d30310a93584389de2127
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.3 on 2020-12-07 11:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("okr", "0030_auto_20201126_1641"),
]
operations = [
migrations.AlterField(
model_name="insta",
name="name",
field=models.CharField(
help_text="Name des Accounts", max_length=200, verbose_name="Name"
),
),
migrations.AlterField(
model_name="insta",
name="quintly_profile_id",
field=models.IntegerField(verbose_name="Quintly Profil-ID"),
),
migrations.AlterField(
model_name="instacollaboration",
name="collaboration_type",
field=models.ForeignKey(
help_text="Bezeichnung der Art von Collaboration",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="collaboration",
to="okr.instacollaborationtype",
verbose_name="Format",
),
),
migrations.AlterField(
model_name="instacollaborationtype",
name="name",
field=models.CharField(
help_text="Bezeichnung der Art von Collaboration",
max_length=200,
verbose_name="Name",
),
),
migrations.AlterField(
model_name="instainsight",
name="insta",
field=models.ForeignKey(
help_text="Globale ID des Instagram-Accounts",
on_delete=django.db.models.deletion.CASCADE,
related_name="insights",
related_query_name="insight",
to="okr.insta",
verbose_name="Instagram-Account",
),
),
migrations.AlterField(
model_name="instainsight",
name="interval",
field=models.CharField(
choices=[
("daily", "Täglich"),
("weekly", "Wöchentlich"),
("monthly", "Monatlich"),
],
help_text="Intervall (täglich, wöchentlich oder monatlich)",
max_length=10,
verbose_name="Zeitraum",
),
),
migrations.AlterField(
model_name="instapost",
name="comments",
field=models.IntegerField(
help_text="Anzahl der Kommentare", verbose_name="Kommentare"
),
),
migrations.AlterField(
model_name="instapost",
name="created_at",
field=models.DateTimeField(verbose_name="Erstellungsdatum"),
),
migrations.AlterField(
model_name="instapost",
name="external_id",
field=models.CharField(
max_length=25, unique=True, verbose_name="Externe ID"
),
),
migrations.AlterField(
model_name="instapost",
name="insta",
field=models.ForeignKey(
help_text="Globale ID des Instagram-Accounts",
on_delete=django.db.models.deletion.CASCADE,
related_name="posts",
related_query_name="post",
to="okr.insta",
verbose_name="Instagram-Account",
),
),
migrations.AlterField(
model_name="instapost",
name="likes",
field=models.IntegerField(
help_text="Anzahl der Likes", verbose_name="Likes"
),
),
migrations.AlterField(
model_name="instapost",
name="link",
field=models.URLField(help_text="URL des Postings", verbose_name="Link"),
),
migrations.AlterField(
model_name="instapost",
name="message",
field=models.TextField(
help_text="Volltext des Postings", verbose_name="Text"
),
),
migrations.AlterField(
model_name="instapost",
name="post_type",
field=models.CharField(
help_text="Art des Postings (Image, Carousel, etc)",
max_length=20,
verbose_name="Typ",
),
),
migrations.AlterField(
model_name="instastory",
name="caption",
field=models.TextField(
help_text="Volltext des Story-Elements", null=True, verbose_name="Text"
),
),
migrations.AlterField(
model_name="instastory",
name="created_at",
field=models.DateTimeField(verbose_name="Erstellungszeitpunkt"),
),
migrations.AlterField(
model_name="instastory",
name="exits",
field=models.IntegerField(
help_text="Anzahl der Ausstiege", verbose_name="Exits"
),
),
migrations.AlterField(
model_name="instastory",
name="external_id",
field=models.CharField(
max_length=25, unique=True, verbose_name="Externe ID"
),
),
migrations.AlterField(
model_name="instastory",
name="insta",
field=models.ForeignKey(
help_text="Globale ID des Instagram-Accounts",
on_delete=django.db.models.deletion.CASCADE,
related_name="stories",
related_query_name="story",
to="okr.insta",
verbose_name="Instagram-Account",
),
),
migrations.AlterField(
model_name="instastory",
name="link",
field=models.URLField(
help_text="URL des Story-Elements", max_length=1024, verbose_name="Link"
),
),
migrations.AlterField(
model_name="instastory",
name="replies",
field=models.IntegerField(
help_text="Anzahl der Antworten", verbose_name="Antworten"
),
),
migrations.AlterField(
model_name="instastory",
name="story_type",
field=models.CharField(
help_text="Art des Story-Elements (Image/Video)",
max_length=200,
verbose_name="Typ",
),
),
migrations.AlterField(
model_name="podcast",
name="name",
field=models.CharField(
help_text="Name des Accounts", max_length=200, verbose_name="Name"
),
),
migrations.AlterField(
model_name="property",
name="name",
field=models.CharField(
help_text="Name des Accounts", max_length=200, verbose_name="Name"
),
),
migrations.AlterField(
model_name="youtube",
name="name",
field=models.CharField(
help_text="Name des Accounts", max_length=200, verbose_name="Name"
),
),
migrations.AlterField(
model_name="youtube",
name="quintly_profile_id",
field=models.IntegerField(verbose_name="Quintly Profil-ID"),
),
migrations.AlterField(
model_name="youtubeagerangeaverageviewduration",
name="interval",
field=models.CharField(
choices=[
("daily", "Täglich"),
("weekly", "Wöchentlich"),
("monthly", "Monatlich"),
],
help_text="Intervall (täglich, wöchentlich, monatlich)",
max_length=10,
verbose_name="Zeitraum",
),
),
migrations.AlterField(
model_name="youtubeagerangeaverageviewpercentage",
name="interval",
field=models.CharField(
choices=[
("daily", "Täglich"),
("weekly", "Wöchentlich"),
("monthly", "Monatlich"),
],
help_text="Intervall (täglich, wöchentlich, monatlich)",
max_length=10,
verbose_name="Zeitraum",
),
),
migrations.AlterField(
model_name="youtubeagerangeviewspercentage",
name="interval",
field=models.CharField(
choices=[
("daily", "Täglich"),
("weekly", "Wöchentlich"),
("monthly", "Monatlich"),
],
help_text="Intervall (täglich, wöchentlich, monatlich)",
max_length=10,
verbose_name="Zeitraum",
),
),
migrations.AlterField(
model_name="youtubeagerangewatchtimepercentage",
name="interval",
field=models.CharField(
choices=[
("daily", "Täglich"),
("weekly", "Wöchentlich"),
("monthly", "Monatlich"),
],
help_text="Intervall (täglich, wöchentlich, monatlich)",
max_length=10,
verbose_name="Zeitraum",
),
),
migrations.AlterField(
model_name="youtubeanalytics",
name="interval",
field=models.CharField(
choices=[
("daily", "Täglich"),
("weekly", "Wöchentlich"),
("monthly", "Monatlich"),
],
help_text="Intervall (täglich, wöchentlich, monatlich)",
max_length=10,
verbose_name="Zeitraum",
),
),
migrations.AlterField(
model_name="youtubeanalytics",
name="youtube",
field=models.ForeignKey(
help_text="Globale ID des YouTube-Accouts",
on_delete=django.db.models.deletion.CASCADE,
related_name="analytic",
related_query_name="analytics",
to="okr.youtube",
verbose_name="YouTube-Account",
),
),
migrations.AlterField(
model_name="youtubetrafficsource",
name="youtube",
field=models.ForeignKey(
help_text="Globale ID des YouTube-Accounts",
on_delete=django.db.models.deletion.CASCADE,
related_name="traffic_source",
related_query_name="traffic_sources",
to="okr.youtube",
verbose_name="YouTube-Account",
),
),
]
| 34.66879
| 88
| 0.497703
| 848
| 10,886
| 6.214623
| 0.159198
| 0.125237
| 0.156546
| 0.181594
| 0.810626
| 0.798861
| 0.713283
| 0.583491
| 0.550854
| 0.526186
| 0
| 0.010769
| 0.39436
| 10,886
| 313
| 89
| 34.779553
| 0.788564
| 0.004134
| 0
| 0.807818
| 1
| 0
| 0.208875
| 0.018913
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.006515
| 0
| 0.016287
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0b59aeb914ac6cabaf5352fbc88af4f8ce6a230e
| 3,583
|
py
|
Python
|
data_loader.py
|
RohitGandikota/Satellite-Images-to-thematic-maps-using-Generative-Adversarial-Networks.
|
39f67614bb6b5a9e52fb286901b0b5832468b486
|
[
"MIT"
] | null | null | null |
data_loader.py
|
RohitGandikota/Satellite-Images-to-thematic-maps-using-Generative-Adversarial-Networks.
|
39f67614bb6b5a9e52fb286901b0b5832468b486
|
[
"MIT"
] | null | null | null |
data_loader.py
|
RohitGandikota/Satellite-Images-to-thematic-maps-using-Generative-Adversarial-Networks.
|
39f67614bb6b5a9e52fb286901b0b5832468b486
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 9 13:27:16 2019
@author: Rohit Gandikota and Radha Krishna
"""
import os
import numpy as np
from osgeo import gdal
#datagen = ImageDataGenerator()
#TASK TO DO.
#THERE ARE TWO IMAGES TO LOAD HERE. 1 IS THE MAIN SAT IMAGE AND THE OTHER IS THE WATER IMAGE.
def load_data(batch_size=1, is_testing=False):
data_type = "train" if not is_testing else "test"
data_main='C:\\Users\\user\\Desktop\\Projects\\ImageTileFD\\data_roads\\'
data_sat='C:\\Users\\user\\Desktop\\Projects\\ImageTileFD\\data_roads\\Data\\'
data_water='C:\\Users\\user\\Desktop\\Projects\\ImageTileFD\\data_roads\\Labels\\'
images_water = os.listdir(data_water)
images_sat = os.listdir(data_sat)
args = np.intersect1d(images_water, images_sat)
batch_images = np.random.choice(args, size=batch_size)
sat_data = []
water_data = []
for img_path in batch_images:
sat_img = gdal.Open(data_sat+img_path).ReadAsArray()
water_img=gdal.Open(data_water+img_path).ReadAsArray()
water_img[water_img!=water_img]= 0
water_img[water_img>0] = 1
sat_img = np.einsum('ijk->jki', sat_img)
sat_img = (sat_img - sat_img.min()) / (sat_img.max() - sat_img.min())
pad = np.zeros((256,256,3))
pad_w = np.zeros((256,256))
pad[:220,:220,:]=sat_img
pad_w[:220,:220]=water_img
# sat_img = (np.zeros(256,256,3)[:220,:220]=sat_img)
sat_data.append(pad)
water_data.append(pad_w)
water_data = np.array(water_data)
water_data = np.expand_dims(water_data, axis=-1)
sat_data = np.array(sat_data)
return water_data,sat_data
def load_batch(batch_size=1, is_testing=False):
data_type = "train" if not is_testing else "test"
data_main='C:\\Users\\user\\Desktop\\Projects\\ImageTileFD\\data_roads\\'
data_sat='C:\\Users\\user\\Desktop\\Projects\\ImageTileFD\\data_roads\\Data\\'
data_water='C:\\Users\\user\\Desktop\\Projects\\ImageTileFD\\data_roads\\Labels\\'
images_water = os.listdir(data_water)
images_sat = os.listdir(data_sat)
args = np.intersect1d(images_water, images_sat)
#batch_images = np.random.choice(os.listdir(data_sat), size=batch_size)
n_batches = int(len(args) / batch_size)
for i in range(n_batches-1):
batch_images = args[i*batch_size:(i+1)*batch_size]
sat_data = []
water_data = []
for img_path in batch_images:
# print(data_sat+img_path
sat_img = gdal.Open(data_sat+img_path).ReadAsArray()
water_img=gdal.Open(data_water+img_path).ReadAsArray()
water_img[water_img!=water_img]= 0
water_img[water_img>0] = 1
sat_img = np.einsum('ijk->jki', sat_img)
sat_img = (sat_img - sat_img.min()) / (sat_img.max() - sat_img.min())
pad = np.zeros((256,256,3))
pad_w = np.zeros((256,256))
pad[:220,:220,:]=sat_img
pad_w[:220,:220]=water_img
# sat_img = (np.zeros(256,256,3)[:220,:220]=sat_img)
sat_data.append(pad)
water_data.append(pad_w)
water_data = np.array(water_data)
water_data = np.expand_dims(water_data, axis=-1)
sat_data = np.array(sat_data)
yield water_data,sat_data
##print(load_data(batch_size=10))
#image_generator=load_batch(batch_size=500)
#water_data, sat_data=next(image_generator)
##
| 38.526882
| 94
| 0.623779
| 523
| 3,583
| 4.015296
| 0.202677
| 0.071429
| 0.034286
| 0.048571
| 0.759048
| 0.759048
| 0.759048
| 0.759048
| 0.759048
| 0.759048
| 0
| 0.039503
| 0.236952
| 3,583
| 93
| 95
| 38.526882
| 0.728603
| 0.161596
| 0
| 0.819672
| 0
| 0
| 0.147994
| 0.136238
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032787
| false
| 0
| 0.04918
| 0
| 0.098361
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0bb5b3ab31f02a54ad08292ec334e5898681ede8
| 246
|
py
|
Python
|
jms_estimator/__init__.py
|
jameshtwose/learning_CICD
|
ef91c547683f76af57196f7bc548ee9562b79563
|
[
"BSD-3-Clause"
] | null | null | null |
jms_estimator/__init__.py
|
jameshtwose/learning_CICD
|
ef91c547683f76af57196f7bc548ee9562b79563
|
[
"BSD-3-Clause"
] | null | null | null |
jms_estimator/__init__.py
|
jameshtwose/learning_CICD
|
ef91c547683f76af57196f7bc548ee9562b79563
|
[
"BSD-3-Clause"
] | null | null | null |
from .jms_estimator import JmsEstimator
from .jms_estimator import JmsClassifier
from .jms_estimator import JmsTransformer
from .version import __version__
__all__ = ['JmsEstimator', 'JmsClassifier', 'JmsTransformer',
'__version__']
| 27.333333
| 61
| 0.784553
| 24
| 246
| 7.416667
| 0.375
| 0.117978
| 0.269663
| 0.370787
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142276
| 246
| 8
| 62
| 30.75
| 0.843602
| 0
| 0
| 0
| 0
| 0
| 0.203252
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0bc1f75fc4d25aa7a124dd98160c743cc6e66f22
| 23
|
py
|
Python
|
textar/__init__.py
|
sreecodeslayer/textar
|
9d61b5d8b78b5f736d5795ec09da55ad7ba730de
|
[
"MIT"
] | 1
|
2019-11-27T20:18:05.000Z
|
2019-11-27T20:18:05.000Z
|
textar/__init__.py
|
sreecodeslayer/textar
|
9d61b5d8b78b5f736d5795ec09da55ad7ba730de
|
[
"MIT"
] | null | null | null |
textar/__init__.py
|
sreecodeslayer/textar
|
9d61b5d8b78b5f736d5795ec09da55ad7ba730de
|
[
"MIT"
] | null | null | null |
from .api import Textar
| 23
| 23
| 0.826087
| 4
| 23
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 23
| 1
| 23
| 23
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e7e89feda6cbc92ffffa1533a134f3b2479d6cf0
| 233
|
py
|
Python
|
baekjoon/10171.py
|
GihwanKim/Baekjoon
|
52eb2bf80bb1243697858445e5b5e2d50d78be4e
|
[
"MIT"
] | null | null | null |
baekjoon/10171.py
|
GihwanKim/Baekjoon
|
52eb2bf80bb1243697858445e5b5e2d50d78be4e
|
[
"MIT"
] | null | null | null |
baekjoon/10171.py
|
GihwanKim/Baekjoon
|
52eb2bf80bb1243697858445e5b5e2d50d78be4e
|
[
"MIT"
] | null | null | null |
"""
10171 : 고양이
URL : https://www.acmicpc.net/problem/10171
Input :
Output :
\ /\
) ( ')
( / )
\(__)|
"""
print("\\ /\\")
print(" ) ( ')")
print("( / )")
print(" \\(__)|")
| 14.5625
| 47
| 0.32618
| 15
| 233
| 4.8
| 0.733333
| 0.416667
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 0.399142
| 233
| 15
| 48
| 15.533333
| 0.442857
| 0.519313
| 0
| 0
| 0
| 0
| 0.432432
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
e7f6db0654aef633d36ddd147a5e0e1ebdd54991
| 15,411
|
py
|
Python
|
tests/test_filter_abund.py
|
Dmarch28/khmer
|
86ce40a6619fc6f6e9c4ce18ce1e89de93ba2f83
|
[
"CNRI-Python"
] | null | null | null |
tests/test_filter_abund.py
|
Dmarch28/khmer
|
86ce40a6619fc6f6e9c4ce18ce1e89de93ba2f83
|
[
"CNRI-Python"
] | 4
|
2021-03-19T08:45:22.000Z
|
2022-02-18T21:25:42.000Z
|
tests/test_filter_abund.py
|
Dmarch28/khmer
|
86ce40a6619fc6f6e9c4ce18ce1e89de93ba2f83
|
[
"CNRI-Python"
] | 1
|
2021-03-16T12:01:37.000Z
|
2021-03-16T12:01:37.000Z
|
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2016, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: khmer-project@idyll.org
# pylint: disable=missing-docstring
import os
import khmer
import screed
from . import khmer_tst_utils as utils
from .test_scripts import _make_counting
def test_filter_abund_1():
script = 'filter-abund.py'
infile = utils.copy_test_data('test-abund-read-2.fa')
n_infile = utils.copy_test_data('test-fastq-n-reads.fq')
in_dir = os.path.dirname(infile)
n_in_dir = os.path.dirname(n_infile)
counting_ht = _make_counting(infile, K=17)
n_counting_ht = _make_counting(n_infile, K=17)
args = [counting_ht, infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
n_outfile = n_infile + '.abundfilt'
n_outfile2 = n_infile + '2.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 1, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
args = [n_counting_ht, n_infile]
utils.runscript(script, args, n_in_dir)
seqs = set([r.sequence for r in screed.open(n_infile)])
assert os.path.exists(n_outfile), n_outfile
args = [n_counting_ht, n_infile, '-o', n_outfile2]
utils.runscript(script, args, in_dir)
assert os.path.exists(n_outfile2), n_outfile2
def test_filter_abund_2():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
counting_ht = _make_counting(infile, K=17)
script = 'filter-abund.py'
args = ['-C', '1', counting_ht, infile, infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
def test_filter_abund_2_stdin():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
counting_ht = _make_counting(infile, K=17)
script = 'filter-abund.py'
args = ['-C', '1', counting_ht, '-']
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status == 1
assert "Accepting input from stdin; output filename must be provided" \
in str(err)
def test_filter_abund_2_stdin_gzip_out():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
outfile = utils.get_temp_filename('out.fa.gz')
counting_ht = _make_counting(infile, K=17)
script = 'filter-abund.py'
args = ['-C', '1', counting_ht, infile, '-o', outfile, '--gzip']
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
print(out)
print(err)
assert status == 0
# make sure that FASTQ records are retained.
def test_filter_abund_3_fq_retained():
infile = utils.copy_test_data('test-abund-read-2.fq')
in_dir = os.path.dirname(infile)
counting_ht = _make_counting(infile, K=17)
script = 'filter-abund.py'
args = ['-C', '1', counting_ht, infile, infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
# check for 'quality' string.
quals = set([r.quality for r in screed.open(outfile)])
assert len(quals) == 2, quals
assert '##################' in quals
# make sure that FASTQ names are properly parsed, both formats.
def test_filter_abund_4_fq_casava_18():
infile = utils.copy_test_data('test-abund-read-2.paired2.fq')
in_dir = os.path.dirname(infile)
counting_ht = _make_counting(infile, K=17)
script = 'filter-abund.py'
args = [counting_ht, infile, infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert 'pair:foo 1::N' in seqs, seqs
def test_filter_abund_1_singlefile():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'filter-abund-single.py'
args = ['-x', '1e7', '-N', '2', '-k', '17', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 98' in err, err
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 1, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
def test_filter_abund_1_singlefile_long_k():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'filter-abund-single.py'
args = ['-x', '1e7', '-N', '2', '-k', '35', '-H', 'murmur', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 80' in err, err
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 0
def test_filter_abund_1_singlefile_long_k_nosave():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'filter-abund-single.py'
args = ['-x', '1e7', '-N', '2', '-k', '35', '-H', 'murmur', infile,
'--savegraph', 'foo']
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
print(out)
print(err)
assert status == 1
assert 'ERROR: cannot save different hash functions yet.' in err
def test_filter_abund_2_singlefile():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
tabfile = utils.get_temp_filename('test-savegraph.ct')
script = 'filter-abund-single.py'
args = ['-x', '1e7', '-N', '2', '-k', '17', '--savegraph',
tabfile, infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 98' in err, err
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 1, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
def test_filter_abund_2_singlefile_fq_casava_18():
infile = utils.copy_test_data('test-abund-read-2.paired2.fq')
in_dir = os.path.dirname(infile)
script = 'filter-abund-single.py'
args = ['-x', '1e7', '-N', '2', '-k', '17', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert 'pair:foo 1::N' in seqs, seqs
def test_filter_abund_4_retain_low_abund():
# test that the -V option does not trim sequences that are low abundance
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
counting_ht = _make_counting(infile, K=17)
script, args = ('filter-abund.py', ['-V', counting_ht, infile])
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
def test_filter_abund_single_4_retain_low_abund():
# test that the -V option does not trim sequences that are low abundance
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
counting_ht = _make_counting(infile, K=17)
script, args = ('filter-abund-single.py', ['-k', '17', '-V', infile])
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
def test_filter_abund_5_trim_high_abund():
# test that the -V option *does* trim sequences that are high abundance
infile = utils.copy_test_data('test-abund-read-3.fa')
in_dir = os.path.dirname(infile)
counting_ht = _make_counting(infile, K=17)
script, args = ('filter-abund.py', ['-V', counting_ht, infile])
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
# trimmed sequence @ error
assert 'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGC' in seqs
def test_filter_abund_single_trim_high_abund():
# test that the -V option *does* trim sequences that are high abundance
infile = utils.copy_test_data('test-abund-read-3.fa')
in_dir = os.path.dirname(infile)
script, args = ('filter-abund-single.py', ['-k', '17', '-V', infile])
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
# trimmed sequence @ error
assert 'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGC' in seqs
def test_filter_abund_6_trim_high_abund_Z():
# test that -V/-Z settings interact properly -
# trimming should not happen if -Z is set high enough.
infile = utils.copy_test_data('test-abund-read-3.fa')
in_dir = os.path.dirname(infile)
counting_ht = _make_counting(infile, K=17)
for script, args in (('filter-abund.py',
['-V', '-Z', '25', counting_ht, infile]),
('filter-abund-single.py',
['-k', '17', '-V', '-Z', '25', infile])):
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
# untrimmed seq.
badseq = 'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGCgtgCCGCAGCTG' \
'TCGTCAGGGGATTTCCGGGCGG'
assert badseq in seqs # should be there, untrimmed
def test_filter_abund_7_retain_Ns():
# check that filter-abund retains sequences with Ns, and treats them as As.
infile = utils.copy_test_data('test-filter-abund-Ns.fq')
in_dir = os.path.dirname(infile)
# copy test file over to test.fq & load into countgraph
counting_ht = _make_counting(infile, K=17)
script = 'filter-abund.py'
args = ['-C', '3', counting_ht, infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
# test for a sequence with an 'N' in it --
names = set([r.name for r in screed.open(outfile)])
assert '895:1:37:17593:9954 1::FOO_withN' in names, names
# check to see if that 'N' was properly changed to an 'A'
seqs = set([r.sequence for r in screed.open(outfile)])
assert 'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAG' not in seqs, seqs
# ...and that an 'N' remains in the output sequences
found_N = False
for s in seqs:
if 'N' in s:
found_N = True
assert found_N, seqs
def test_filter_abund_single_8_retain_Ns():
# check that filter-abund-single retains
# sequences with Ns, and treats them as As.
infile = utils.copy_test_data('test-filter-abund-Ns.fq')
in_dir = os.path.dirname(infile)
script = 'filter-abund-single.py'
args = ['-k', '17', '-x', '1e7', '-N', '2', '-C', '3', infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
# test for a sequence with an 'N' in it --
names = set([r.name for r in screed.open(outfile)])
assert '895:1:37:17593:9954 1::FOO_withN' in names, names
# check to see if that 'N' was properly changed to an 'A'
seqs = set([r.sequence for r in screed.open(outfile)])
assert 'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAG' not in seqs, seqs
# ...and that an 'N' remains in the output sequences
found_N = False
for s in seqs:
if 'N' in s:
found_N = True
assert found_N, seqs
def test_outfile():
infile = utils.get_test_data('paired-mixed-witherror.fa.pe')
outfile = utils.get_temp_filename('paired-mixed-witherror.fa.pe.abundfilt')
script = 'filter-abund-single.py'
args = ['-o', outfile, infile]
(status, out, err) = utils.runscript(script, args)
md5hash = utils._calc_md5(open(outfile, 'rb'))
assert md5hash == 'f17122f4c0c3dc0bcc4eeb375de93040', md5hash
def test_filter_abund_1_quiet():
script = 'filter-abund.py'
infile = utils.copy_test_data('test-abund-read-2.fa')
n_infile = utils.copy_test_data('test-fastq-n-reads.fq')
in_dir = os.path.dirname(infile)
n_in_dir = os.path.dirname(n_infile)
counting_ht = _make_counting(infile, K=17)
n_counting_ht = _make_counting(n_infile, K=17)
args = ['-q', counting_ht, infile]
status, out, err = utils.runscript(script, args, in_dir)
assert len(err) == 0
assert len(out) < 1000
outfile = infile + '.abundfilt'
n_outfile = n_infile + '.abundfilt'
n_outfile2 = n_infile + '2.abundfilt'
assert os.path.exists(outfile), outfile
def test_filter_abund_1_singlefile_quiet():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'filter-abund-single.py'
args = ['-q', '-x', '1e7', '-N', '2', '-k', '17', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert len(err) == 0
assert len(out) < 1000
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
| 32.929487
| 79
| 0.671468
| 2,231
| 15,411
| 4.501121
| 0.137606
| 0.050388
| 0.045808
| 0.054969
| 0.779924
| 0.751842
| 0.728042
| 0.720972
| 0.708624
| 0.698566
| 0
| 0.017856
| 0.200506
| 15,411
| 467
| 80
| 33
| 0.797176
| 0.181364
| 0
| 0.740876
| 0
| 0
| 0.164756
| 0.056458
| 0
| 0
| 0
| 0
| 0.229927
| 1
| 0.076642
| false
| 0
| 0.018248
| 0
| 0.094891
| 0.014599
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f001e8486ccf82d8397e6988168e509306e2626b
| 1,063
|
py
|
Python
|
tests/test_cli.py
|
developmentseed/python-seed
|
5a5ef1f89199595e5c4b5b1fc33c5ce0e7eb4e3d
|
[
"MIT"
] | 16
|
2018-03-04T18:34:52.000Z
|
2021-11-03T17:36:18.000Z
|
tests/test_cli.py
|
developmentseed/python-seed
|
5a5ef1f89199595e5c4b5b1fc33c5ce0e7eb4e3d
|
[
"MIT"
] | 9
|
2017-12-18T15:12:07.000Z
|
2020-10-01T17:05:36.000Z
|
tests/test_cli.py
|
developmentseed/python-seed
|
5a5ef1f89199595e5c4b5b1fc33c5ce0e7eb4e3d
|
[
"MIT"
] | 6
|
2019-03-07T19:49:54.000Z
|
2022-01-20T17:57:04.000Z
|
"""tests python_seed.cli."""
import os
from click.testing import CliRunner
from python_seed.scripts.cli import pyseed
def test_create():
"""Test the create function"""
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(pyseed, ["create", "myfunction"])
assert not os.path.exists("myfunction/.github/workflows/ci.yml")
assert not os.path.exists("myfunction/codecov.yml")
with open("myfunction/README.md", "r") as f:
assert f.read().splitlines()[0] == "# myfunction"
assert not result.exception
assert result.exit_code == 0
with runner.isolated_filesystem():
result = runner.invoke(pyseed, ["create", "myfunction", "--ci", "github"])
assert os.path.exists("myfunction/.github/workflows/ci.yml")
assert os.path.exists("myfunction/codecov.yml")
with open("myfunction/README.md", "r") as f:
assert f.read().splitlines()[0] == "# myfunction"
assert not result.exception
assert result.exit_code == 0
| 35.433333
| 82
| 0.644403
| 129
| 1,063
| 5.255814
| 0.348837
| 0.053097
| 0.070796
| 0.129794
| 0.778761
| 0.769912
| 0.752212
| 0.752212
| 0.752212
| 0.610619
| 0
| 0.004762
| 0.209784
| 1,063
| 29
| 83
| 36.655172
| 0.802381
| 0.044214
| 0
| 0.47619
| 0
| 0
| 0.220896
| 0.113433
| 0
| 0
| 0
| 0
| 0.47619
| 1
| 0.047619
| false
| 0
| 0.142857
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f02109817c82deefd1b9b024cb3ce8d43803ffc3
| 22,033
|
py
|
Python
|
ironic_staging_drivers/tests/unit/intel_nm/test_vendor.py
|
NaohiroTamura/ironic-staging-drivers
|
cf29fd0515760eb2ecb3855359d5acc395168a9e
|
[
"Apache-2.0"
] | null | null | null |
ironic_staging_drivers/tests/unit/intel_nm/test_vendor.py
|
NaohiroTamura/ironic-staging-drivers
|
cf29fd0515760eb2ecb3855359d5acc395168a9e
|
[
"Apache-2.0"
] | null | null | null |
ironic_staging_drivers/tests/unit/intel_nm/test_vendor.py
|
NaohiroTamura/ironic-staging-drivers
|
cf29fd0515760eb2ecb3855359d5acc395168a9e
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Intel NM vendor interface
"""
import os
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers.modules import ipmitool
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
from ironic_lib import utils as ironic_utils
import mock
from oslo_config import cfg
from ironic_staging_drivers.intel_nm import nm_commands
from ironic_staging_drivers.intel_nm import nm_vendor
CONF = cfg.CONF
_MAIN_IDS = {'domain_id': 'platform', 'policy_id': 111}
_POLICY = {'domain_id': 'platform', 'enable': True, 'policy_id': 111,
'policy_trigger': 'none', 'action': 'alert',
'power_domain': 'primary', 'target_limit': 100,
'correction_time': 200, 'reporting_period': 600}
_SUSPEND = {'domain_id': 'platform', 'policy_id': 121,
'periods': [{'start': 10, 'stop': 30, 'days': ['monday']}]}
_GET_CAP = {'domain_id': 'platform', 'policy_trigger': 'none',
'power_domain': 'primary'}
_CONTROL = {'scope': 'global', 'enable': True}
_STATISTICS = {'scope': 'global', 'domain_id': 'platform',
'parameter_name': 'response_time'}
_VENDOR_METHODS_DATA = {'get_nm_policy': _MAIN_IDS,
'remove_nm_policy': _MAIN_IDS,
'get_nm_policy_suspend': _MAIN_IDS,
'remove_nm_policy_suspend': _MAIN_IDS,
'set_nm_policy': _POLICY,
'set_nm_policy_suspend': _SUSPEND,
'get_nm_capabilities': _GET_CAP,
'control_nm_policy': _CONTROL,
'get_nm_statistics': _STATISTICS,
'reset_nm_statistics': _STATISTICS}
class IntelNMPassthruTestCase(db_base.DbTestCase):
def setUp(self):
super(IntelNMPassthruTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_nm')
self.node = obj_utils.create_test_node(self.context, driver='fake_nm')
self.temp_filename = os.path.join(CONF.tempdir, self.node.uuid +
'.sdr')
@mock.patch.object(ironic_utils, 'unlink_without_raise', spec_set=True,
autospec=True)
@mock.patch.object(ipmitool, 'send_raw', spec_set=True, autospec=True)
@mock.patch.object(ipmitool, 'dump_sdr', spec_set=True, autospec=True)
@mock.patch.object(nm_commands, 'parse_slave_and_channel', spec_set=True,
autospec=True)
def test__get_nm_address_detected(self, parse_mock, dump_mock, raw_mock,
unlink_mock):
parse_mock.return_value = ('0x0A', '0x0B')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ret = nm_vendor._get_nm_address(task)
self.assertEqual(('0x0B', '0x0A'), ret)
self.node.refresh()
internal_info = self.node.driver_internal_info
self.assertEqual('0x0A', internal_info['intel_nm_address'])
self.assertEqual('0x0B', internal_info['intel_nm_channel'])
parse_mock.assert_called_once_with(self.temp_filename)
dump_mock.assert_called_once_with(task, self.temp_filename)
unlink_mock.assert_called_once_with(self.temp_filename)
raw_mock.assert_called_once_with(task, mock.ANY)
@mock.patch.object(ironic_utils, 'unlink_without_raise', spec_set=True,
autospec=True)
@mock.patch.object(ipmitool, 'send_raw', spec_set=True, autospec=True)
@mock.patch.object(ipmitool, 'dump_sdr', spec_set=True, autospec=True)
@mock.patch.object(nm_commands, 'parse_slave_and_channel', spec_set=True,
autospec=True)
def test__get_nm_address_already_detected(self, parse_mock, dump_mock,
raw_mock, unlink_mock):
internal_info = self.node.driver_internal_info
internal_info['intel_nm_channel'] = '0x0B'
internal_info['intel_nm_address'] = '0x0A'
self.node.driver_internal_info = internal_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ret = nm_vendor._get_nm_address(task)
self.assertEqual(('0x0B', '0x0A'), ret)
self.assertFalse(parse_mock.called)
self.assertFalse(dump_mock.called)
self.assertFalse(raw_mock.called)
self.assertFalse(unlink_mock.called)
@mock.patch.object(ironic_utils, 'unlink_without_raise', spec_set=True,
autospec=True)
@mock.patch.object(ipmitool, 'send_raw', spec_set=True, autospec=True)
@mock.patch.object(ipmitool, 'dump_sdr', spec_set=True, autospec=True)
@mock.patch.object(nm_commands, 'parse_slave_and_channel', spec_set=True,
autospec=True)
def test__get_nm_address_not_detected(self, parse_mock, dump_mock,
raw_mock, unlink_mock):
parse_mock.return_value = None
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IPMIFailure, nm_vendor._get_nm_address,
task)
self.node.refresh()
internal_info = self.node.driver_internal_info
self.assertEqual(False, internal_info['intel_nm_address'])
self.assertEqual(False, internal_info['intel_nm_channel'])
parse_mock.assert_called_once_with(self.temp_filename)
dump_mock.assert_called_once_with(task, self.temp_filename)
unlink_mock.assert_called_once_with(self.temp_filename)
self.assertFalse(raw_mock.called)
@mock.patch.object(ironic_utils, 'unlink_without_raise', spec_set=True,
autospec=True)
@mock.patch.object(ipmitool, 'send_raw', spec_set=True, autospec=True)
@mock.patch.object(ipmitool, 'dump_sdr', spec_set=True, autospec=True)
@mock.patch.object(nm_commands, 'parse_slave_and_channel', spec_set=True,
autospec=True)
def test__get_nm_address_raw_fail(self, parse_mock, dump_mock, raw_mock,
unlink_mock):
parse_mock.return_value = ('0x0A', '0x0B')
raw_mock.side_effect = exception.IPMIFailure('raw error')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IPMIFailure, nm_vendor._get_nm_address,
task)
self.node.refresh()
internal_info = self.node.driver_internal_info
self.assertEqual(False, internal_info['intel_nm_address'])
self.assertEqual(False, internal_info['intel_nm_channel'])
parse_mock.assert_called_once_with(self.temp_filename)
dump_mock.assert_called_once_with(task, self.temp_filename)
unlink_mock.assert_called_once_with(self.temp_filename)
raw_mock.assert_called_once_with(task, mock.ANY)
@mock.patch.object(ironic_utils, 'unlink_without_raise', spec_set=True,
autospec=True)
@mock.patch.object(ipmitool, 'send_raw', spec_set=True, autospec=True)
@mock.patch.object(ipmitool, 'dump_sdr', spec_set=True, autospec=True)
@mock.patch.object(nm_commands, 'parse_slave_and_channel', spec_set=True,
autospec=True)
def test__get_nm_address_already_not_detected(self, parse_mock, dump_mock,
raw_mock, unlink_mock):
internal_info = self.node.driver_internal_info
internal_info['intel_nm_channel'] = False
internal_info['intel_nm_address'] = False
self.node.driver_internal_info = internal_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IPMIFailure, nm_vendor._get_nm_address,
task)
self.assertFalse(parse_mock.called)
self.assertFalse(dump_mock.called)
self.assertFalse(raw_mock.called)
self.assertFalse(unlink_mock.called)
@mock.patch.object(ipmitool, 'send_raw', spec_set=True, autospec=True)
@mock.patch.object(nm_vendor, '_get_nm_address', spec_set=True,
autospec=True)
def test__execute_nm_command(self, addr_mock, raw_mock):
addr_mock.return_value = ('0x0A', '0x0B')
raw_mock.return_value = ('0x03 0x04', '')
fake_data = {'foo': 'bar'}
fake_command = mock.MagicMock()
fake_parse = mock.MagicMock()
fake_command.return_value = ('0x01', '0x02')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
nm_vendor._execute_nm_command(task, fake_data, fake_command,
fake_parse)
self.assertEqual('single', task.node.driver_info['ipmi_bridging'])
self.assertEqual('0x0A',
task.node.driver_info['ipmi_target_channel'])
self.assertEqual('0x0B',
task.node.driver_info['ipmi_target_address'])
fake_command.assert_called_once_with(fake_data)
raw_mock.assert_called_once_with(task, '0x01 0x02')
fake_parse.assert_called_once_with(['0x03', '0x04'])
@mock.patch.object(ipmitool, 'send_raw', spec_set=True, autospec=True)
@mock.patch.object(nm_vendor, '_get_nm_address', spec_set=True,
autospec=True)
def test__execute_nm_command_no_parse(self, addr_mock, raw_mock):
addr_mock.return_value = ('0x0A', '0x0B')
fake_data = {'foo': 'bar'}
fake_command = mock.MagicMock()
fake_command.return_value = ('0x01', '0x02')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
nm_vendor._execute_nm_command(task, fake_data, fake_command)
self.assertEqual('single', task.node.driver_info['ipmi_bridging'])
self.assertEqual('0x0A',
task.node.driver_info['ipmi_target_channel'])
self.assertEqual('0x0B',
task.node.driver_info['ipmi_target_address'])
fake_command.assert_called_once_with(fake_data)
raw_mock.assert_called_once_with(task, '0x01 0x02')
def test_validate_json(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
for method, data in _VENDOR_METHODS_DATA.items():
task.driver.vendor.validate(task, method, 'fake', **data)
def test_validate_json_error(self):
fake_data = {'foo': 'bar'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
for method in _VENDOR_METHODS_DATA:
self.assertRaises(exception.InvalidParameterValue,
task.driver.vendor.validate, task, method,
'fake', **fake_data)
def test_validate_control_no_domain(self):
data = {'scope': 'domain', 'enable': True}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.vendor.validate, task,
'control_nm_policy', 'fake', **data)
def test_validate_control_no_policy(self):
data = {'scope': 'policy', 'enable': True, 'domain_id': 'platform'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.vendor.validate, task,
'control_nm_policy', 'fake', **data)
def test_validate_policy_boot(self):
data = _POLICY.copy()
del data['correction_time']
data['policy_trigger'] = 'boot'
data['target_limit'] = {'boot_mode': 'power', 'cores_disabled': 2}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.validate(task, 'set_nm_policy', 'fake', **data)
def test_validate_policy_boot_error(self):
data = _POLICY.copy()
data['policy_trigger'] = 'boot'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.vendor.validate, task,
'set_nm_policy', 'fake', **data)
def test_validate_policy_no_correction_time(self):
data = _POLICY.copy()
del data['correction_time']
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.vendor.validate, task,
'set_nm_policy', 'fake', **data)
def test_validate_statistics_no_policy(self):
data = {'scope': 'policy', 'domain_id': 'platform'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.vendor.validate, task,
'reset_nm_statistics', 'fake', **data)
def test_validate_statistics_no_domain(self):
data = {'scope': 'global', 'parameter_name': 'power'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.vendor.validate, task,
'get_nm_statistics', 'fake', **data)
def test_reset_statistics_invalid_parameter(self):
data = {'scope': 'global', 'domain_id': 'platform',
'parameter_name': 'power'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.vendor.validate, task,
'reset_nm_statistics', 'fake', **data)
def test_get_statistics_no_parameter(self):
data = {'scope': 'global', 'domain_id': 'platform'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.vendor.validate, task,
'get_nm_statistics', 'fake', **data)
def test_get_statistics_invalid_parameter(self):
data = {'scope': 'policy', 'domain_id': 'platform', 'policy_id': 111,
'parameter_name': 'response_time'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.vendor.validate, task,
'get_nm_statistics', 'fake', **data)
@mock.patch.object(nm_vendor, '_execute_nm_command', spec_set=True,
autospec=True)
def test_control_nm_policy(self, mock_exec):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.control_nm_policy(task)
mock_exec.assert_called_once_with(task, {},
nm_commands.control_policies)
@mock.patch.object(nm_vendor, '_execute_nm_command', spec_set=True,
autospec=True)
def test_set_nm_policy(self, mock_exec):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.set_nm_policy(task)
mock_exec.assert_called_once_with(task, {},
nm_commands.set_policy)
@mock.patch.object(nm_vendor, '_execute_nm_command', spec_set=True,
autospec=True)
def test_get_nm_policy(self, mock_exec):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.get_nm_policy(task)
mock_exec.assert_called_once_with(task, {},
nm_commands.get_policy,
nm_commands.parse_policy)
@mock.patch.object(nm_vendor, '_execute_nm_command', spec_set=True,
autospec=True)
def test_remove_nm_policy(self, mock_exec):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.remove_nm_policy(task)
mock_exec.assert_called_once_with(task, {},
nm_commands.remove_policy)
@mock.patch.object(nm_vendor, '_execute_nm_command', spec_set=True,
autospec=True)
def test_set_nm_policy_suspend(self, mock_exec):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.set_nm_policy_suspend(task)
mock_exec.assert_called_once_with(task, {},
nm_commands.set_policy_suspend)
@mock.patch.object(nm_vendor, '_execute_nm_command', spec_set=True,
autospec=True)
def test_get_nm_policy_suspend(self, mock_exec):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.get_nm_policy_suspend(task)
mock_exec.assert_called_once_with(task, {},
nm_commands.get_policy_suspend,
nm_commands.parse_policy_suspend)
@mock.patch.object(nm_vendor, '_execute_nm_command', spec_set=True,
autospec=True)
def test_remove_nm_policy_suspend(self, mock_exec):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.remove_nm_policy_suspend(task)
mock_exec.assert_called_once_with(task, {},
nm_commands.remove_policy_suspend
)
@mock.patch.object(nm_vendor, '_execute_nm_command', spec_set=True,
autospec=True)
def test_get_nm_capabilities(self, mock_exec):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.get_nm_capabilities(task)
mock_exec.assert_called_once_with(task, {},
nm_commands.get_capabilities,
nm_commands.parse_capabilities)
@mock.patch.object(nm_vendor, '_execute_nm_command', spec_set=True,
autospec=True)
def test_get_nm_version(self, mock_exec):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.get_nm_version(task)
mock_exec.assert_called_once_with(task, {},
nm_commands.get_version,
nm_commands.parse_version)
@mock.patch.object(nm_vendor, '_execute_nm_command', spec_set=True,
autospec=True)
def test_get_nm_statistics(self, mock_exec):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.get_nm_statistics(task)
mock_exec.assert_called_once_with(task, {},
nm_commands.get_statistics,
nm_commands.parse_statistics)
@mock.patch.object(nm_vendor, '_execute_nm_command', spec_set=True,
autospec=True)
def test_reset_nm_statistics(self, mock_exec):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.reset_nm_statistics(task)
mock_exec.assert_called_once_with(task, {},
nm_commands.reset_statistics)
| 51.12065
| 79
| 0.596741
| 2,466
| 22,033
| 5.018654
| 0.091646
| 0.031028
| 0.042421
| 0.053733
| 0.826842
| 0.810359
| 0.799612
| 0.777796
| 0.757595
| 0.745071
| 0
| 0.006936
| 0.306404
| 22,033
| 430
| 80
| 51.239535
| 0.802905
| 0.025371
| 0
| 0.654054
| 0
| 0
| 0.096523
| 0.008436
| 0
| 0
| 0.005966
| 0
| 0.17027
| 1
| 0.083784
| false
| 0.005405
| 0.032432
| 0
| 0.118919
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f0522e65d11970fa2cb606c1448581ffd3d8825e
| 111
|
py
|
Python
|
LEVEL1/가운데글자가져오기/solution.py
|
seunghwanly/CODING-TEST
|
a820da950c163d399594770199aa2e782d1fbbde
|
[
"MIT"
] | null | null | null |
LEVEL1/가운데글자가져오기/solution.py
|
seunghwanly/CODING-TEST
|
a820da950c163d399594770199aa2e782d1fbbde
|
[
"MIT"
] | null | null | null |
LEVEL1/가운데글자가져오기/solution.py
|
seunghwanly/CODING-TEST
|
a820da950c163d399594770199aa2e782d1fbbde
|
[
"MIT"
] | null | null | null |
def solution(s):
if len(s) % 2 != 0: return s[len(s) // 2]
else: return s[len(s) // 2:len(s) // 2 + 2]
| 27.75
| 47
| 0.486486
| 23
| 111
| 2.347826
| 0.391304
| 0.296296
| 0.37037
| 0.407407
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 0.261261
| 111
| 3
| 48
| 37
| 0.585366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f0748bde1df9dfd51e53e54624473476ad1fcb7c
| 198
|
py
|
Python
|
Codewars/8kyu/object-oriented-piracy/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | 7
|
2017-09-20T16:40:39.000Z
|
2021-08-31T18:15:08.000Z
|
Codewars/8kyu/object-oriented-piracy/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
Codewars/8kyu/object-oriented-piracy/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
# Python - 2.7.6
class Ship:
def __init__(self, draft, crew):
self.draft = draft
self.crew = crew
def is_worth_it(self):
return (self.draft - self.crew * 1.5) > 20
| 19.8
| 50
| 0.570707
| 30
| 198
| 3.566667
| 0.6
| 0.252336
| 0.242991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050725
| 0.30303
| 198
| 9
| 51
| 22
| 0.724638
| 0.070707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
b2ea1344648a8fd31c351aeb5a944417fa666670
| 45
|
py
|
Python
|
app/tgbot/handlers/admin/user/__init__.py
|
AnViSe/cost_confirmation_bot
|
f8eaa39c3df742bef0fc79b8b7ce0231f1b18749
|
[
"MIT"
] | 13
|
2021-12-27T19:46:19.000Z
|
2022-03-19T07:55:25.000Z
|
app/tgbot/handlers/admin/user/__init__.py
|
AnViSe/cost_confirmation_bot
|
f8eaa39c3df742bef0fc79b8b7ce0231f1b18749
|
[
"MIT"
] | null | null | null |
app/tgbot/handlers/admin/user/__init__.py
|
AnViSe/cost_confirmation_bot
|
f8eaa39c3df742bef0fc79b8b7ce0231f1b18749
|
[
"MIT"
] | 1
|
2022-02-07T10:48:18.000Z
|
2022-02-07T10:48:18.000Z
|
from .setup import register_user_db_handlers
| 22.5
| 44
| 0.888889
| 7
| 45
| 5.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6521c799635d0ce23e77f8047c619295f8220f62
| 219
|
py
|
Python
|
auto_ria_client/search.py
|
DSAdv/auto-ria-python
|
8cfbb0b475dce8e7871236c4f9c1cbf4a937383a
|
[
"Apache-2.0"
] | 2
|
2021-06-15T09:17:26.000Z
|
2022-01-05T20:15:11.000Z
|
auto_ria_client/search.py
|
DSAdv/auto-ria-python
|
8cfbb0b475dce8e7871236c4f9c1cbf4a937383a
|
[
"Apache-2.0"
] | null | null | null |
auto_ria_client/search.py
|
DSAdv/auto-ria-python
|
8cfbb0b475dce8e7871236c4f9c1cbf4a937383a
|
[
"Apache-2.0"
] | null | null | null |
class Search:
def __init__(self):
pass
def execute(self):
pass
def __repr__(self):
pass
def __str__(self):
pass
class QueryBuilder:
pass
class Query:
pass
| 9.954545
| 23
| 0.547945
| 24
| 219
| 4.5
| 0.458333
| 0.296296
| 0.305556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.383562
| 219
| 21
| 24
| 10.428571
| 0.8
| 0
| 0
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0.461538
| 0
| 0
| 0.538462
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
e8f5d13370c88501c5930a64f508cc704785b5dc
| 6,981
|
py
|
Python
|
hpcframework_unittest.py
|
Azure/hpcpack-mesos
|
871d48b0ab187c227edfe12b3dfaca7e6ad0dd59
|
[
"MIT"
] | 4
|
2019-04-26T03:03:13.000Z
|
2020-06-01T14:26:09.000Z
|
hpcframework_unittest.py
|
Azure/hpcpack-mesos
|
871d48b0ab187c227edfe12b3dfaca7e6ad0dd59
|
[
"MIT"
] | 2
|
2018-09-05T01:47:59.000Z
|
2018-09-05T01:49:20.000Z
|
hpcframework_unittest.py
|
Azure/hpcpack-mesos
|
871d48b0ab187c227edfe12b3dfaca7e6ad0dd59
|
[
"MIT"
] | 4
|
2018-09-05T01:35:40.000Z
|
2021-01-10T10:47:12.000Z
|
import json
import unittest
from mesoshttp.offers import Offer
from mock import patch, MagicMock, call
import hpcframework
def create_mock_mesos_offer_aux(cpus, max_cores, is_windows, hostname):
json_offer = '''
{{
"hostname": "{}",
"attributes": [
'''.format(hostname)
if (is_windows):
json_offer += '''
{
"text": {
"value": "windows_server"
},
"type": "TEXT",
"name": "os"
},
'''
json_offer += '''
{{
"scalar": {{
"value": {}
}},
"type": "SCALAR",
"name": "cores"
}}
],
"resources": [
{{
"type": "SCALAR",
"allocation_info": {{
"role": "*"
}},
"role": "*",
"name": "cpus",
"scalar": {{
"value": {}
}}
}}
]
}}
'''.format(max_cores, cpus)
return json.loads(json_offer)
def create_mock_mesos_offer(cpus, max_cores, is_windows, hostname):
return Offer("uri", "fid", "sid", create_mock_mesos_offer_aux(cpus, max_cores, is_windows, hostname))
class HpcFrameworkUnitTest(unittest.TestCase):
def setUp(self):
self.hpcpackFramework = hpcframework.HpcpackFramwork("", "", "", "", "", "")
@patch('hpcframework.HpcpackFramwork.decline_offer')
@patch('hpcframework.HpcpackFramwork.accept_offer')
@patch('restclient.HpcRestClient.get_grow_decision')
def test_accpet_offer(self, mock_get_grow_decision, mock_accept_offer, mock_decline_offer):
mock_get_grow_decision.return_value = MagicMock(cores_to_grow=1)
offer = create_mock_mesos_offer(4.0, 4.0, True, "host1")
offers = [offer]
self.hpcpackFramework.offer_received(offers)
mock_accept_offer.assert_called_with(offer)
mock_decline_offer.assert_not_called()
@patch('hpcframework.HpcpackFramwork.decline_offer')
@patch('hpcframework.HpcpackFramwork.accept_offer')
@patch('restclient.HpcRestClient.get_grow_decision')
def test_no_need_to_grow(self, mock_get_grow_decision, mock_accept_offer, mock_decline_offer):
mock_get_grow_decision.return_value = MagicMock(cores_to_grow=0)
offer = create_mock_mesos_offer(4.0, 4.0, True, "host1")
offers = [offer]
self.hpcpackFramework.offer_received(offers)
mock_accept_offer.assert_not_called()
mock_decline_offer.assert_called_with(offer)
@patch('hpcframework.HpcpackFramwork.decline_offer')
@patch('hpcframework.HpcpackFramwork.accept_offer')
@patch('restclient.HpcRestClient.get_grow_decision')
def test_accept_partial_offer(self, mock_get_grow_decision, mock_accept_offer, mock_decline_offer):
mock_get_grow_decision.return_value = MagicMock(cores_to_grow=2)
offer1 = create_mock_mesos_offer(1.0, 1.0, True, "host1")
offer2 = create_mock_mesos_offer(1.0, 1.0, True, "host2")
offer3 = create_mock_mesos_offer(1.0, 1.0, True, "host3")
offers = [offer1, offer2, offer3]
self.hpcpackFramework.offer_received(offers)
calls = [call(offer1), call(offer2)]
mock_accept_offer.assert_has_calls(calls)
mock_decline_offer.assert_called_with(offer3)
@patch('hpc_cluster_manager.HpcClusterManager.get_cores_in_provisioning')
@patch('hpcframework.HpcpackFramwork.decline_offer')
@patch('hpcframework.HpcpackFramwork.accept_offer')
@patch('restclient.HpcRestClient.get_grow_decision')
def test_accept_offer_with_provisioning(self, mock_get_grow_decision, mock_accept_offer, mock_decline_offer,
mock_get_cores_in_provisioning):
mock_get_grow_decision.return_value = MagicMock(cores_to_grow=5)
mock_get_cores_in_provisioning.return_value = 1
offer1 = create_mock_mesos_offer(1.0, 1.0, True, "host1")
offer2 = create_mock_mesos_offer(1.0, 1.0, True, "host2")
offer3 = create_mock_mesos_offer(1.0, 1.0, True, "host3")
offers = [offer1, offer2, offer3]
self.hpcpackFramework.offer_received(offers)
calls = [call(offer1), call(offer2), call(offer3)]
mock_accept_offer.assert_has_calls(calls)
mock_decline_offer.assert_not_called()
@patch('hpc_cluster_manager.HpcClusterManager.get_cores_in_provisioning')
@patch('hpcframework.HpcpackFramwork.decline_offer')
@patch('hpcframework.HpcpackFramwork.accept_offer')
@patch('restclient.HpcRestClient.get_grow_decision')
def test_accept_partial_offer_with_provisioning(self, mock_get_grow_decision, mock_accept_offer, mock_decline_offer,
mock_get_cores_in_provisioning):
mock_get_grow_decision.return_value = MagicMock(cores_to_grow=2)
mock_get_cores_in_provisioning.return_value = 1
offer1 = create_mock_mesos_offer(1.0, 1.0, True, "host1")
offer2 = create_mock_mesos_offer(1.0, 1.0, True, "host2")
offer3 = create_mock_mesos_offer(1.0, 1.0, True, "host3")
offers = [offer1, offer2, offer3]
self.hpcpackFramework.offer_received(offers)
calls = [call(offer2), call(offer3)]
mock_accept_offer.assert_called_with(offer1)
mock_decline_offer.assert_has_calls(calls)
@patch('hpc_cluster_manager.HpcClusterManager.check_fqdn_collision')
@patch('hpc_cluster_manager.HpcClusterManager.get_cores_in_provisioning')
@patch('hpcframework.HpcpackFramwork.decline_offer')
@patch('hpcframework.HpcpackFramwork.accept_offer')
@patch('restclient.HpcRestClient.get_grow_decision')
def test_declient_offer_on_fqdn_collision(self, mock_get_grow_decision, mock_accept_offer, mock_decline_offer,
mock_get_cores_in_provisioning, mock_check_fqdn_collision):
mock_get_grow_decision.return_value = MagicMock(cores_to_grow=2)
mock_get_cores_in_provisioning.return_value = 0
mock_check_fqdn_collision.return_value = True
offer1 = create_mock_mesos_offer(1.0, 1.0, True, "host1")
offers = [offer1]
self.hpcpackFramework.offer_received(offers)
mock_accept_offer.assert_not_called()
mock_decline_offer.assert_called_with(offer1)
@patch('hpcframework.HpcpackFramwork.decline_offer')
@patch('hpcframework.HpcpackFramwork.accept_offer')
@patch('restclient.HpcRestClient.get_grow_decision')
def test_decline_non_dedicated_offer(self, mock_get_grow_decision, mock_accept_offer, mock_decline_offer):
mock_get_grow_decision.return_value = MagicMock(cores_to_grow=1)
offer = create_mock_mesos_offer(4.0, 5.0, True, "host1")
offers = [offer]
self.hpcpackFramework.offer_received(offers)
mock_accept_offer.assert_not_called()
mock_decline_offer.assert_called_with(offer)
if __name__ == '__main__':
unittest.main()
| 43.360248
| 120
| 0.683713
| 819
| 6,981
| 5.420024
| 0.114774
| 0.054517
| 0.070962
| 0.072088
| 0.852444
| 0.828565
| 0.815274
| 0.806263
| 0.795675
| 0.795675
| 0
| 0.019022
| 0.209282
| 6,981
| 160
| 121
| 43.63125
| 0.785145
| 0
| 0
| 0.587413
| 0
| 0
| 0.26701
| 0.160722
| 0
| 0
| 0
| 0
| 0.097902
| 1
| 0.06993
| false
| 0
| 0.034965
| 0.006993
| 0.125874
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
33303b16e74d1aec44ae5a537aa51d8158fadf63
| 104
|
py
|
Python
|
pydynamo_brain/pydynamo_brain/ui/actions/__init__.py
|
ubcbraincircuits/pyDynamo
|
006eb6edb5e54670574dbfdf7d249e9037f01ffc
|
[
"MIT"
] | 4
|
2021-12-16T22:32:47.000Z
|
2022-01-03T05:42:12.000Z
|
pydynamo_brain/pydynamo_brain/ui/actions/__init__.py
|
padster/pyDynamo
|
006eb6edb5e54670574dbfdf7d249e9037f01ffc
|
[
"MIT"
] | 1
|
2021-11-15T18:14:20.000Z
|
2021-11-15T18:14:36.000Z
|
pydynamo_brain/pydynamo_brain/ui/actions/__init__.py
|
padster/pyDynamo
|
006eb6edb5e54670574dbfdf7d249e9037f01ffc
|
[
"MIT"
] | 1
|
2022-01-21T23:03:24.000Z
|
2022-01-21T23:03:24.000Z
|
from .fullStateActions import FullStateActions
from .dendriteCanvasActions import DendriteCanvasActions
| 34.666667
| 56
| 0.903846
| 8
| 104
| 11.75
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 104
| 2
| 57
| 52
| 0.979167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
333aab4e17029bab7fda872889f1a7117add2c69
| 326
|
py
|
Python
|
plugins/mcafee_epo/komand_mcafee_epo/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/mcafee_epo/komand_mcafee_epo/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/mcafee_epo/komand_mcafee_epo/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .add_permission_set_to_user.action import AddPermissionSetToUser
from .clear_tags.action import ClearTags
from .get_policies.action import GetPolicies
from .run_wake_up.action import RunWakeUp
from .search_agents.action import SearchAgents
from .tag_system.action import TagSystem
| 40.75
| 69
| 0.855828
| 47
| 326
| 5.723404
| 0.680851
| 0.267658
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101227
| 326
| 7
| 70
| 46.571429
| 0.918089
| 0.113497
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
334e78d2ac8d951d0d117796a849e5e49f787bdd
| 87
|
py
|
Python
|
tributary/lazy/calculations/__init__.py
|
vishalbelsare/tributary
|
ab1a75eea50e92cbff2aa1b3d4e576cb25bc20e3
|
[
"Apache-2.0"
] | 1
|
2022-03-23T10:50:42.000Z
|
2022-03-23T10:50:42.000Z
|
tributary/lazy/calculations/__init__.py
|
vishalbelsare/tributary
|
ab1a75eea50e92cbff2aa1b3d4e576cb25bc20e3
|
[
"Apache-2.0"
] | null | null | null |
tributary/lazy/calculations/__init__.py
|
vishalbelsare/tributary
|
ab1a75eea50e92cbff2aa1b3d4e576cb25bc20e3
|
[
"Apache-2.0"
] | null | null | null |
from .finance import *
from .ops import *
from .rolling import *
from .basket import *
| 17.4
| 22
| 0.724138
| 12
| 87
| 5.25
| 0.5
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183908
| 87
| 4
| 23
| 21.75
| 0.887324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
335e45555ad860b91cab1d9b0f14a0fa4cc15cdf
| 7,159
|
py
|
Python
|
tests/RejectPipeline.py
|
lbausch/filebeat-exim4
|
fb58f3f438cb2689a0750be78cd52cc6412b4c47
|
[
"MIT"
] | null | null | null |
tests/RejectPipeline.py
|
lbausch/filebeat-exim4
|
fb58f3f438cb2689a0750be78cd52cc6412b4c47
|
[
"MIT"
] | null | null | null |
tests/RejectPipeline.py
|
lbausch/filebeat-exim4
|
fb58f3f438cb2689a0750be78cd52cc6412b4c47
|
[
"MIT"
] | null | null | null |
import unittest
import BaseTestCase
class RejectPipeline(BaseTestCase.BaseTestCase):
pipeline_file = '../module/exim4/reject/ingest/pipeline.json'
def test_pipeline(self):
message = 'foo'
response = self.request(message)
source = self.source(response)
self.assertSourceEquals(source, {
'message': message,
'error': {
'message': 'Provided Grok expressions do not match field value: [foo]'
}
})
def test_greylisting(self):
message = "2021-05-04 13:37:00 +0100 H=mail.remotehost.tld [123.123.123.123]:1337 X=TLSv1.2:ECDHE-RSA-AES128-GCM-SHA256:128 CV=no F=<mail@sender.tld> temporarily rejected RCPT <mail@recipient.tld>: Deferred due to greylisting. Host: '123.123.123.123' From: 'mail@sender.tld' To: 'mail@recipient.tld' SPF: 'none'"
response = self.request(message)
source = self.source(response)
self.assertSourceEquals(source, {
'@timestamp': '2021-05-04T13:37:00.000+01:00',
'exim4': {
'message_raw': message,
'remote_host': 'mail.remotehost.tld',
'remote_addr': '123.123.123.123',
'remote_addr_port': '1337',
'tls': {
'cipher_suite': 'TLSv1.2:ECDHE-RSA-AES128-GCM-SHA256:128',
'cert_verification_status': 'no',
},
'sender_address': 'mail@sender.tld',
'recipient_address': 'mail@recipient.tld',
'message': "Deferred due to greylisting. Host: '123.123.123.123' From: 'mail@sender.tld' To: 'mail@recipient.tld' SPF: 'none'",
},
})
def test_greylisting_without_cipher_suite(self):
message = "2021-05-04 13:37:00 +0100 H=(mail.remotehost.tld) [123.123.123.123]:1337 F=<mail@sender.tld> temporarily rejected RCPT <mail@recipient.tld>: Deferred due to greylisting. Host: '123.123.123.123' From: 'mail@sender.tld' To: 'mail@recipient.tld' SPF: 'neutral'"
response = self.request(message)
source = self.source(response)
self.assertSourceEquals(source, {
'@timestamp': '2021-05-04T13:37:00.000+01:00',
'exim4': {
'message_raw': message,
'remote_host': 'mail.remotehost.tld',
'remote_addr': '123.123.123.123',
'remote_addr_port': '1337',
'sender_address': 'mail@sender.tld',
'recipient_address': 'mail@recipient.tld',
'message': "Deferred due to greylisting. Host: '123.123.123.123' From: 'mail@sender.tld' To: 'mail@recipient.tld' SPF: 'neutral'",
},
})
def test_rbl(self):
message = '2021-05-04 13:37:00 +0100 H=(mail.remotehost.tld) [123.123.123.123]:1337 F=<mail@sender.tld> rejected RCPT <mail@recipient.tld>: "JunkMail rejected - (mail.remotehost.tld) [123.123.123.123]:1337 is in an RBL (rbl.tld), see [https://rbl.tld]"'
response = self.request(message)
source = self.source(response)
self.assertSourceEquals(source, {
'@timestamp': '2021-05-04T13:37:00.000+01:00',
'exim4': {
'message_raw': message,
'remote_host': 'mail.remotehost.tld',
'remote_addr': '123.123.123.123',
'remote_addr_port': '1337',
'sender_address': 'mail@sender.tld',
'recipient_address': 'mail@recipient.tld',
'message': '"JunkMail rejected - (mail.remotehost.tld) [123.123.123.123]:1337 is in an RBL (rbl.tld), see [https://rbl.tld]"',
},
})
def test_spf(self):
message = '2021-05-04 13:37:00 +0100 H=mail.remotehost.tld [123.123.123.123]:1337 F=<mail@sender.tld> rejected RCPT <mail@recipient.tld>: SPF: 123.123.123.123 is not allowed to send mail from sender.tld'
response = self.request(message)
source = self.source(response)
self.assertSourceEquals(source, {
'@timestamp': '2021-05-04T13:37:00.000+01:00',
'exim4': {
'message_raw': message,
'remote_host': 'mail.remotehost.tld',
'remote_addr': '123.123.123.123',
'remote_addr_port': '1337',
'sender_address': 'mail@sender.tld',
'recipient_address': 'mail@recipient.tld',
'message': 'SPF: 123.123.123.123 is not allowed to send mail from sender.tld',
},
})
def test_no_such_user_here(self):
message = '2021-05-04 13:37:00 +0100 H=mail.remotehost.tld [123.123.123.123]:1337 F=<mail@sender.tld> rejected RCPT <mail@recipient.tld>: No Such User Here"'
response = self.request(message)
source = self.source(response)
self.assertSourceEquals(source, {
'@timestamp': '2021-05-04T13:37:00.000+01:00',
'exim4': {
'message_raw': message,
'remote_host': 'mail.remotehost.tld',
'remote_addr': '123.123.123.123',
'remote_addr_port': '1337',
'sender_address': 'mail@sender.tld',
'recipient_address': 'mail@recipient.tld',
'message': 'No Such User Here"',
},
})
def test_rejected_junk_mail(self):
message = '2021-05-04 13:37:00 +0100 H=(mail.remotehost.tld) [123.123.123.123]:1337 F=<mail@sender.tld> rejected RCPT <mail@recipient.tld>: "JunkMail rejected - (mail.remotehost.tld) [123.123.123.123]:1337 is in an RBL (rbl.tld), see https://rbl.tld"'
response = self.request(message)
source = self.source(response)
self.assertSourceEquals(source, {
'@timestamp': '2021-05-04T13:37:00.000+01:00',
'exim4': {
'message_raw': message,
'remote_host': 'mail.remotehost.tld',
'remote_addr': '123.123.123.123',
'remote_addr_port': '1337',
'sender_address': 'mail@sender.tld',
'recipient_address': 'mail@recipient.tld',
'message': '"JunkMail rejected - (mail.remotehost.tld) [123.123.123.123]:1337 is in an RBL (rbl.tld), see https://rbl.tld"',
},
})
def test_dropped_syntax_errors(self):
message = '2021-05-04 13:37:00 +0100 SMTP call from (mail.remotehost.tld) [123.123.123.123]:1337 dropped: too many syntax or protocol errors (last command was "RCPT TO: <\'mail@recipient.tld\'>", C=EHLO,AUTH,MAIL,RCPT,RCPT,RCPT,RCPT,RCPT,RCPT)'
response = self.request(message)
source = self.source(response)
self.assertSourceEquals(source, {
'@timestamp': '2021-05-04T13:37:00.000+01:00',
'exim4': {
'message_raw': message,
'message': 'SMTP call from (mail.remotehost.tld) [123.123.123.123]:1337 dropped: too many syntax or protocol errors (last command was "RCPT TO: <\'mail@recipient.tld\'>", C=EHLO,AUTH,MAIL,RCPT,RCPT,RCPT,RCPT,RCPT,RCPT)',
},
})
if __name__ == '__main__':
unittest.main()
| 45.310127
| 320
| 0.576337
| 870
| 7,159
| 4.658621
| 0.133333
| 0.106588
| 0.106588
| 0.071058
| 0.888231
| 0.888231
| 0.888231
| 0.888231
| 0.87244
| 0.865285
| 0
| 0.127329
| 0.272664
| 7,159
| 157
| 321
| 45.598726
| 0.651047
| 0
| 0
| 0.653543
| 0
| 0.110236
| 0.529124
| 0.152535
| 0
| 0
| 0
| 0
| 0.062992
| 1
| 0.062992
| false
| 0
| 0.015748
| 0
| 0.094488
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
336aba3f24409a56531a841bffd0111c287954bc
| 455
|
py
|
Python
|
autotf/tuner/fmin/__init__.py
|
DAIM-ML/autotf
|
3f82d858f49c27d5ecb624cee555fb8fd47bf067
|
[
"BSD-3-Clause"
] | 8
|
2018-03-07T06:58:16.000Z
|
2019-01-30T07:49:44.000Z
|
autotf/tuner/fmin/__init__.py
|
DAIM-ML/autotf
|
3f82d858f49c27d5ecb624cee555fb8fd47bf067
|
[
"BSD-3-Clause"
] | null | null | null |
autotf/tuner/fmin/__init__.py
|
DAIM-ML/autotf
|
3f82d858f49c27d5ecb624cee555fb8fd47bf067
|
[
"BSD-3-Clause"
] | 1
|
2018-03-31T09:06:12.000Z
|
2018-03-31T09:06:12.000Z
|
try:
from .bayesian_optimization import bayesian_optimization
except ImportError:
pass
try:
from .random_search import random_search
except ImportError:
pass
try:
from .fabolas import fabolas
except ImportError:
pass
try:
from .mtbo import mtbo
except ImportError:
pass
try:
from .bohamiann import bohamiann
except ImportError:
pass
try:
from .entropy_search import entropy_search
except ImportError:
pass
| 17.5
| 60
| 0.747253
| 54
| 455
| 6.185185
| 0.259259
| 0.125749
| 0.377246
| 0.359281
| 0.419162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.213187
| 455
| 25
| 61
| 18.2
| 0.932961
| 0
| 0
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
682859dba7a14a9673205c231589735f18f17cb0
| 5,431
|
py
|
Python
|
icepickle/pipeline.py
|
koaning/icepickle
|
f6692e334ceebe8390d8b4960a56eb661236edd3
|
[
"MIT"
] | 8
|
2022-02-14T20:20:30.000Z
|
2022-03-08T10:03:13.000Z
|
icepickle/pipeline.py
|
koaning/icepickle
|
f6692e334ceebe8390d8b4960a56eb661236edd3
|
[
"MIT"
] | 1
|
2022-02-20T08:40:42.000Z
|
2022-02-20T15:02:58.000Z
|
icepickle/pipeline.py
|
koaning/icepickle
|
f6692e334ceebe8390d8b4960a56eb661236edd3
|
[
"MIT"
] | null | null | null |
from sklearn.pipeline import Pipeline, FeatureUnion, _name_estimators
class PartialFeatureUnion(FeatureUnion):
"""
A `PartialFeatureUnion` is a `FeatureUnion` but able to `.partial_fit`.
Arguments:
transformer_list: a list of transformers to apply and concatenate
Example:
```python
import numpy as np
from sklearn.linear_model import SGDClassifier
from sklearn.feature_extraction.text import HashingVectorizer
from icepickle.pipeline import PartialPipeline, PartialFeatureUnion
pipe = PartialPipeline([
("feat", PartialFeatureUnion([
("hash1", HashingVectorizer()),
("hash2", HashingVectorizer(ngram_range=(1,2)))
])),
("clf", SGDClassifier())
])
X = [
"i really like this post",
"thanks for that comment",
"i enjoy this friendly forum",
"this is a bad post",
"i dislike this article",
"this is not well written"
]
y = np.array([1, 1, 1, 0, 0, 0])
for loop in range(3):
pipe.partial_fit(X, y, classes=[0, 1])
assert np.all(pipe.predict(X) == np.array([1, 1, 1, 0, 0, 0]))
```
"""
def partial_fit(self, X, y=None, classes=None, **kwargs):
"""
Fits the components, but allow for batches.
"""
for name, step in self.transformer_list:
if not hasattr(step, "partial_fit"):
raise ValueError(
f"Step {name} is a {step} which does not have `.partial_fit` implemented."
)
for name, step in self.transformer_list:
if hasattr(step, "predict"):
step.partial_fit(X, y, classes=classes, **kwargs)
else:
step.partial_fit(X, y)
return self
def make_partial_union(*transformer_list):
"""
Utility function to generate a `PartialFeatureUnion`
Arguments:
transformer_list: a list of transformers to apply and concatenate
Example:
```python
import numpy as np
from sklearn.linear_model import SGDClassifier
from sklearn.feature_extraction.text import HashingVectorizer
from icepickle.pipeline import make_partial_pipeline, make_partial_union
pipe = make_partial_pipeline(
make_partial_union(
HashingVectorizer(),
HashingVectorizer(ngram_range=(1,2))
),
SGDClassifier()
)
X = [
"i really like this post",
"thanks for that comment",
"i enjoy this friendly forum",
"this is a bad post",
"i dislike this article",
"this is not well written"
]
y = np.array([1, 1, 1, 0, 0, 0])
for loop in range(3):
pipe.partial_fit(X, y, classes=[0, 1])
assert np.all(pipe.predict(X) == np.array([1, 1, 1, 0, 0, 0]))
```
"""
return PartialFeatureUnion(_name_estimators(transformer_list))
class PartialPipeline(Pipeline):
"""
Utility function to generate a `PartialPipeline`
Arguments:
steps: a collection of text-transformers
```python
import numpy as np
from sklearn.linear_model import SGDClassifier
from sklearn.feature_extraction.text import HashingVectorizer
from icepickle.pipeline import PartialPipeline
pipe = PartialPipeline([
("hash", HashingVectorizer()),
("clf", SGDClassifier())
])
X = [
"i really like this post",
"thanks for that comment",
"i enjoy this friendly forum",
"this is a bad post",
"i dislike this article",
"this is not well written"
]
y = np.array([1, 1, 1, 0, 0, 0])
for loop in range(3):
pipe.partial_fit(X, y, classes=[0, 1])
assert np.all(pipe.predict(X) == np.array([1, 1, 1, 0, 0, 0]))
```
"""
def partial_fit(self, X, y=None, classes=None, **kwargs):
"""
Fits the components, but allow for batches.
"""
for name, step in self.steps:
if not hasattr(step, "partial_fit"):
raise ValueError(
f"Step {name} is a {step} which does not have `.partial_fit` implemented."
)
for name, step in self.steps:
if hasattr(step, "predict"):
step.partial_fit(X, y, classes=classes, **kwargs)
else:
step.partial_fit(X, y)
if hasattr(step, "transform"):
X = step.transform(X)
return self
def make_partial_pipeline(*steps):
"""
Utility function to generate a `PartialPipeline`
Arguments:
steps: a collection of text-transformers
```python
import numpy as np
from sklearn.linear_model import SGDClassifier
from sklearn.feature_extraction.text import HashingVectorizer
from icepickle.pipeline import make_partial_pipeline
pipe = make_partial_pipeline(
HashingVectorizer(),
SGDClassifier()
)
X = [
"i really like this post",
"thanks for that comment",
"i enjoy this friendly forum",
"this is a bad post",
"i dislike this article",
"this is not well written"
]
y = np.array([1, 1, 1, 0, 0, 0])
for loop in range(3):
pipe.partial_fit(X, y, classes=[0, 1])
assert np.all(pipe.predict(X) == np.array([1, 1, 1, 0, 0, 0]))
```
"""
return PartialPipeline(_name_estimators(steps))
| 27.155
| 94
| 0.592709
| 652
| 5,431
| 4.855828
| 0.162577
| 0.010107
| 0.020215
| 0.022742
| 0.830385
| 0.788692
| 0.772584
| 0.769425
| 0.759949
| 0.759949
| 0
| 0.017359
| 0.299945
| 5,431
| 199
| 95
| 27.291457
| 0.81536
| 0.622353
| 0
| 0.666667
| 0
| 0
| 0.122382
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.030303
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6875f266eae6972c56d9b5125cb882d8d4d04e2d
| 129
|
py
|
Python
|
idfy_sdk/services/merchantsign/__init__.py
|
idfy-io/idfy-sdk-python
|
0f7ced0cf0df080b1c73e2451bf02a23710b5bf1
|
[
"Apache-2.0"
] | null | null | null |
idfy_sdk/services/merchantsign/__init__.py
|
idfy-io/idfy-sdk-python
|
0f7ced0cf0df080b1c73e2451bf02a23710b5bf1
|
[
"Apache-2.0"
] | null | null | null |
idfy_sdk/services/merchantsign/__init__.py
|
idfy-io/idfy-sdk-python
|
0f7ced0cf0df080b1c73e2451bf02a23710b5bf1
|
[
"Apache-2.0"
] | null | null | null |
from idfy_sdk.services.merchantsign.merchant_sign_service import MerchantSignService
import idfy_sdk.services.merchantsign.models
| 64.5
| 84
| 0.914729
| 16
| 129
| 7.125
| 0.6875
| 0.122807
| 0.263158
| 0.473684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03876
| 129
| 2
| 85
| 64.5
| 0.919355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
68826a5840025710d5af93a3fce5ecd5222c3806
| 46,764
|
py
|
Python
|
tests/modules/voting/test_voting.py
|
rlin0/donut
|
5672df8e853b4b775d7d50665128b255cd695ec2
|
[
"MIT"
] | null | null | null |
tests/modules/voting/test_voting.py
|
rlin0/donut
|
5672df8e853b4b775d7d50665128b255cd695ec2
|
[
"MIT"
] | null | null | null |
tests/modules/voting/test_voting.py
|
rlin0/donut
|
5672df8e853b4b775d7d50665128b255cd695ec2
|
[
"MIT"
] | null | null | null |
"""
Tests donut/modules/voting
"""
from datetime import date, datetime, timedelta
import json
import re
import flask
import pytest
from donut.testing.fixtures import client
from donut import app
from donut.modules.groups.helpers import get_group_list_data
from donut.modules.voting import helpers, routes, ranked_pairs
# Ranked pairs
def test_ranked_pairs():
# Example taken from en.wikipedia.org/wiki/Ranked_pairs
M = 'Memphis'
N = 'Nashville'
C = 'Chattanooga'
K = 'Knoxville'
responses = (((M, ), (N, ), (C, ), (K, )), ) * 42
responses += (((N, ), (C, ), (K, ), (M, )), ) * 26
responses += (((C, ), (K, ), (N, ), (M, )), ) * 15
responses += (((K, ), (C, ), (N, ), (M, )), ) * 17
results = ranked_pairs.results(responses)
assert results.winners == [N, C, K, M]
assert results.tallies == {
(C, K): 42 + 26 + 15,
(C, M): 26 + 15 + 17,
(C, N): 15 + 17,
(K, C): 17,
(K, M): 26 + 15 + 17,
(K, N): 15 + 17,
(M, C): 42,
(M, K): 42,
(M, N): 42,
(N, C): 42 + 26,
(N, K): 42 + 26,
(N, M): 26 + 15 + 17,
}
# Test incomplete lists
results = ranked_pairs.results([[['A']], [['B']], [['A']]])
assert results.winners == ['A', 'B']
# Test ties
responses = [[['A'], ['B', 'C'], ['D']], [['A', 'C'], ['B', 'D']]]
assert ranked_pairs.results(responses).winners == ['A', 'C', 'B', 'D']
# Helpers
def test_question_types(client):
assert helpers.get_question_types() == {
'Dropdown': 1,
'Checkboxes': 2,
'Elected position': 3,
'Short text': 4,
'Long text': 5
}
def test_public_surveys(client):
ruddock_id = get_group_list_data(
['group_id'], {'group_name': 'Ruddock House'})[0]['group_id']
survey_params = [
{
'title': 'Unrestricted',
'group': '',
'end_hour': '12'
},
{
'title': 'Ruddock only',
'group': str(ruddock_id),
'end_hour': '1' # ends later
}
]
yesterday = date.today() + timedelta(days=-1)
tomorrow = date.today() + timedelta(days=1)
access_keys = {}
with client.session_transaction() as sess:
sess['username'] = 'csander'
for params in survey_params:
rv = client.post(
flask.url_for('voting.make_survey'),
data=dict(
description='',
start_date=yesterday.strftime(helpers.YYYY_MM_DD),
start_hour='12',
start_minute='00',
start_period='P',
end_date=tomorrow.strftime(helpers.YYYY_MM_DD),
end_minute='00',
end_period='P',
public='on',
**params),
follow_redirects=False)
assert rv.status_code == 302
access_keys[params['title']] = [
url_piece for url_piece in rv.location.split('/')
if len(url_piece) == 64
][0]
unrestricted = {
'title': 'Unrestricted',
'description': None,
'end_time': datetime(tomorrow.year, tomorrow.month, tomorrow.day, 12),
'access_key': access_keys['Unrestricted'],
'group_id': None
}
assert list(helpers.get_visible_surveys(helpers.get_user_id(
'dqu'))) == [ # not a Rudd
unrestricted
]
assert list(
helpers.get_visible_surveys(helpers.get_user_id('csander'))) == [
unrestricted, {
'title':
'Ruddock only',
'description':
None,
'end_time':
datetime(tomorrow.year, tomorrow.month, tomorrow.day, 13),
'access_key':
access_keys['Ruddock only'],
'group_id':
2
}
]
def test_closed_surveys(client):
yesterday = date.today() + timedelta(days=-1)
tomorrow = date.today() + timedelta(days=1)
survey_params = [{
'title': 'Before',
'start_date': yesterday.strftime(helpers.YYYY_MM_DD),
'start_hour': '2',
'end_date': yesterday.strftime(helpers.YYYY_MM_DD),
'end_hour': '3'
}, {
'title': 'During',
'start_date': yesterday.strftime(helpers.YYYY_MM_DD),
'start_hour': '4',
'end_date': tomorrow.strftime(helpers.YYYY_MM_DD),
'end_hour': '5'
}, {
'title': 'After',
'start_date': tomorrow.strftime(helpers.YYYY_MM_DD),
'start_hour': '2',
'end_date': tomorrow.strftime(helpers.YYYY_MM_DD),
'end_hour': '3'
}]
access_keys = {}
with client.session_transaction() as sess:
sess['username'] = 'csander'
for params in survey_params:
rv = client.post(
flask.url_for('voting.make_survey'),
data=dict(
description='',
start_minute='00',
start_period='P',
end_minute='00',
end_period='P',
public='on',
group='',
**params),
follow_redirects=False)
assert rv.status_code == 302
access_keys[params['title']] = [
url_piece for url_piece in rv.location.split('/')
if len(url_piece) == 64
][0]
assert helpers.get_closed_surveys(helpers.get_user_id('reng')) == (
) # not the creator of 'Before'
before = [{
'title':
'Before',
'description':
None,
'end_time':
datetime(yesterday.year, yesterday.month, yesterday.day, 15),
'access_key':
access_keys['Before'],
'results_shown':
0
}]
assert helpers.get_closed_surveys(helpers.get_user_id('csander')) == before
rv = client.get(
flask.url_for(
'voting.release_results', access_key=access_keys['Before']))
assert rv.status_code == 302
assert rv.location == flask.url_for(
'voting.show_results', access_key=access_keys['Before'])
before[0]['results_shown'] = 1
assert helpers.get_closed_surveys(helpers.get_user_id('reng')) == before
assert helpers.get_closed_surveys(helpers.get_user_id('csander')) == before
helpers.delete_survey(3)
helpers.delete_survey(4)
helpers.delete_survey(5)
def test_survey_data(client):
access_key = list(helpers.get_visible_surveys(1))[0]['access_key']
yesterday = date.today() + timedelta(days=-1)
tomorrow = date.today() + timedelta(days=1)
assert helpers.get_survey_data(access_key) == {
'survey_id':
1,
'title':
'Unrestricted',
'description':
None,
'group_id':
None,
'start_time':
datetime(yesterday.year, yesterday.month, yesterday.day, 12),
'end_time':
datetime(tomorrow.year, tomorrow.month, tomorrow.day, 12),
'creator':
3,
'results_shown':
0
}
def test_question_json(client):
question_types = helpers.get_question_types()
helpers.set_questions(1, [{
'title': 'A',
'description': '',
'type': question_types['Dropdown'],
'choices': ['1', '2', '3']
}, {
'title': 'B',
'description': 'bbb',
'type': question_types['Short text']
}, {
'title': 'C',
'description': 'ccc',
'type': question_types['Checkboxes'],
'choices': ['a', 'b', 'c']
}, {
'title': 'D',
'description': '',
'type': question_types['Long text']
}, {
'title': 'E',
'description': '',
'type': question_types['Elected position'],
'choices': ['do', 're', 'me']
}])
assert helpers.get_questions_json(
1, False
) == '[{"title":"A","description":"","type":1,"choices":["1","2","3"]},{"title":"B","description":"bbb","type":4},{"title":"C","description":"ccc","type":2,"choices":["a","b","c"]},{"title":"D","description":"","type":5},{"title":"E","description":"","type":3,"choices":["do","re","me"]}]'
assert helpers.get_questions_json(
1, True
) == '[{"question_id":1,"title":"A","description":"","type":1,"choices":[{"id":1,"choice":"1"},{"id":2,"choice":"2"},{"id":3,"choice":"3"}]},{"question_id":2,"title":"B","description":"bbb","type":4},{"question_id":3,"title":"C","description":"ccc","type":2,"choices":[{"id":4,"choice":"a"},{"id":5,"choice":"b"},{"id":6,"choice":"c"}]},{"question_id":4,"title":"D","description":"","type":5},{"question_id":5,"title":"E","description":"","type":3,"choices":[{"id":7,"choice":"do"},{"id":8,"choice":"re"},{"id":9,"choice":"me"}]}]'
def test_question_ids(client):
assert helpers.get_question_ids(1) == [1, 2, 3, 4, 5]
assert helpers.get_question_ids(2) == []
def test_question_type(client):
assert list(map(helpers.get_question_type, range(1, 6))) == [1, 4, 2, 5, 3]
def test_get_choice(client):
assert [
helpers.invalid_choice_id(5, choice)
for choice in ['abc', 7, 8, 9, 10]
] == [True, False, False, False, True]
def test_process_params_error(client):
default_params = dict(
title='New survey',
description='',
start_date='2018-05-08',
start_hour='12',
start_minute='00',
start_period='P',
end_date='2018-05-10',
end_hour='12',
end_minute='00',
end_period='P',
public='on',
group='')
def assert_message(message, params):
rv = client.post(
flask.url_for('voting.make_survey'),
data=params,
follow_redirects=False)
assert rv.status_code == 200
assert message in rv.data
rv = client.post(
flask.url_for('voting.make_survey'), follow_redirects=False)
assert rv.status_code == 403
with client.session_transaction() as sess:
sess['username'] = 'csander'
for delete_param in default_params:
if delete_param == 'public': continue # this param is optional
params = default_params.copy()
del params[delete_param]
assert_message(b'Invalid form data', params)
for date_field in ['start_date', 'end_date']:
assert_message(b'Invalid form data', {
**default_params, date_field: '123'
})
for hour_field in ['start_hour', 'end_hour']:
assert_message(b'Invalid form data', {
**default_params, hour_field: 'abc'
})
assert_message(b'Invalid form data', {
**default_params, hour_field: '0'
})
assert_message(b'Invalid form data', {
**default_params, hour_field: '13'
})
for minute_field in ['start_minute', 'end_minute']:
assert_message(b'Invalid form data', {
**default_params, minute_field: 'abc'
})
assert_message(b'Invalid form data', {
**default_params, minute_field: '-1'
})
assert_message(b'Invalid form data', {
**default_params, minute_field: '60'
})
for period_field in ['start_period', 'end_period']:
assert_message(b'Invalid form data', {
**default_params, period_field: 'a'
})
assert_message(b'Invalid form data', {
**default_params, period_field: ''
})
assert_message(b'Invalid form data', {**default_params, 'group': 'a'})
assert_message(b'Start must be before end', {
**default_params, 'start_date': '2018-05-09',
'end_date': '2018-05-08'
})
rv = client.post(
flask.url_for('voting.make_survey'),
data=default_params,
follow_redirects=False)
assert rv.status_code == 302 # successful
helpers.delete_survey(6)
def test_survey_params(client):
yesterday = date.today() + timedelta(days=-1)
tomorrow = date.today() + timedelta(days=1)
assert helpers.get_survey_params(1) == {
'title':
'Unrestricted',
'description':
None,
'start_time':
datetime(yesterday.year, yesterday.month, yesterday.day, 12),
'end_time':
datetime(tomorrow.year, tomorrow.month, tomorrow.day, 12),
'group_id':
None,
'public':
1
}
helpers.update_survey_params(1, {'title': 'ABC', 'group_id': 2})
assert helpers.get_survey_params(1) == {
'title':
'ABC',
'description':
None,
'start_time':
datetime(yesterday.year, yesterday.month, yesterday.day, 12),
'end_time':
datetime(tomorrow.year, tomorrow.month, tomorrow.day, 12),
'group_id':
2,
'public':
1
}
def test_my_surveys(client):
yesterday = date.today() + timedelta(days=-1)
tomorrow = date.today() + timedelta(days=1)
assert helpers.get_my_surveys(helpers.get_user_id('dqu')) == ()
csander = helpers.get_user_id('csander')
assert helpers.get_my_surveys(csander) == [{
'title':
'ABC',
'description':
None,
'access_key':
list(helpers.get_visible_surveys(csander))[0]['access_key'],
'start_time':
datetime(yesterday.year, yesterday.month, yesterday.day, 12),
'unopened':
0,
'closed':
0,
'end_time':
datetime(tomorrow.year, tomorrow.month, tomorrow.day, 12)
}, {
'title':
'Ruddock only',
'description':
None,
'access_key':
list(helpers.get_visible_surveys(csander))[1]['access_key'],
'start_time':
datetime(yesterday.year, yesterday.month, yesterday.day, 12),
'unopened':
0,
'closed':
0,
'end_time':
datetime(tomorrow.year, tomorrow.month, tomorrow.day, 13)
}]
def test_respond(client):
assert not helpers.some_responses_for_survey(1)
with app.test_request_context():
flask.session['username'] = 'csander'
helpers.set_responses([1, 2, 3, 4, 5], [
'2', '"asdf"', '[4, 6]', '"Lorem ipsum dolor sit amet"',
'[[7], [-1], [9], [-2], [null]]'
])
assert helpers.some_responses_for_survey(1)
results = helpers.get_results(1)
election_result = results.pop()
assert results == [{
'question_id': 1,
'title': 'A',
'description': None,
'type': 1,
'list_order': 0,
'choices': {
1: '1',
2: '2',
3: '3'
},
'responses': [2],
'results': [(2, 1)]
}, {
'question_id': 2,
'title': 'B',
'description': 'bbb',
'type': 4,
'list_order': 1,
'choices': 0,
'responses': ['asdf'],
'results': [('asdf', 1)]
}, {
'question_id': 3,
'title': 'C',
'description': 'ccc',
'type': 2,
'list_order': 2,
'choices': {
4: 'a',
5: 'b',
6: 'c'
},
'responses': [[4, 6]],
'results': [(4, 1), (6, 1)]
}, {
'question_id': 4,
'title': 'D',
'description': None,
'type': 5,
'list_order': 3,
'choices': 0,
'responses': ['Lorem ipsum dolor sit amet'],
'results': [('Lorem ipsum dolor sit amet', 1)]
}]
results = election_result.pop('results')
assert election_result == {
'question_id': 5,
'title': 'E',
'description': None,
'type': 3,
'list_order': 4,
'choices': {
7: 'do',
8: 're',
9: 'me'
},
'responses': [[['do'], ['David Qu'], ['me'], ['Robert Eng'], ['NO']]]
}
assert results.winners == ['do', 'David Qu', 'me', 'Robert Eng', 'NO']
with app.test_request_context():
flask.session['username'] = 'dqu'
# Invalid elected position response
helpers.set_responses([5], ['[["abc"]]'])
with pytest.raises(Exception) as e:
helpers.get_results(1)
assert e.value.args == ('Unrecognized elected position vote', )
def test_restrict_access(client):
assert helpers.restrict_take_access(None) == 'Invalid access key'
yesterday = datetime.now() + timedelta(days=-1)
tomorrow = datetime.now() + timedelta(days=1)
assert helpers.restrict_take_access({
'start_time': yesterday,
'end_time': yesterday
}) == 'Survey is not currently accepting responses'
assert helpers.restrict_take_access({
'start_time': tomorrow,
'end_time': tomorrow
}) == 'Survey is not currently accepting responses'
with app.test_request_context():
assert helpers.restrict_take_access({
'start_time': yesterday,
'end_time': tomorrow
}) == 'Must be logged in to take survey'
with app.test_request_context():
flask.session['username'] = 'dqu'
assert helpers.restrict_take_access({
'start_time': yesterday,
'end_time': tomorrow,
'group_id': 2
}) == 'You do not belong to the group this survey is for'
with app.test_request_context():
flask.session['username'] = 'csander'
assert helpers.restrict_take_access({
'survey_id': 1,
'start_time': yesterday,
'end_time': tomorrow,
'group_id': 2
}) == 'Already completed'
with app.test_request_context():
flask.session['username'] = 'reng'
assert helpers.restrict_take_access({
'survey_id': 1,
'start_time': yesterday,
'end_time': tomorrow,
'group_id': 2
}) is None
# Routes
def test_home(client):
rv = client.get(flask.url_for('voting.list_surveys'))
assert rv.status_code == 200
assert b'Ruddock only' not in rv.data
with client.session_transaction() as sess:
sess['username'] = 'csander'
rv = client.get(flask.url_for('voting.list_surveys'))
assert rv.status_code == 200
assert b'Ruddock only' in rv.data
def test_take(client):
access_key = list(
helpers.get_visible_surveys(helpers.get_user_id('csander')))[1][
'access_key']
rv = client.get(flask.url_for('voting.take_survey', access_key=access_key))
assert rv.status_code == 200
assert b'Must be logged in to take survey' in rv.data
with client.session_transaction() as sess:
sess['username'] = 'csander'
helpers.set_questions(2, [{
'title': 'Question 1',
'description': '',
'type': helpers.get_question_types()['Long text']
}])
rv = client.get(flask.url_for('voting.take_survey', access_key=access_key))
assert rv.status_code == 200
assert b'Edit' in rv.data
assert b'Question 1' in rv.data
with client.session_transaction() as sess:
sess['username'] = 'reng'
rv = client.get(flask.url_for('voting.take_survey', access_key=access_key))
assert rv.status_code == 200
assert b'Edit' not in rv.data
assert b'Question 1' in rv.data
def test_make_form(client):
with client.session_transaction() as sess:
sess['username'] = 'ruddock_pres'
rv = client.get(flask.url_for('voting.make_survey_form'))
assert rv.status_code == 403
with client.session_transaction() as sess:
sess['username'] = 'csander'
rv = client.get(flask.url_for('voting.make_survey_form'))
assert rv.status_code == 200
assert b'Making new survey' in rv.data
def test_manage(client):
rv = client.get(flask.url_for('voting.my_surveys'))
assert rv.status_code == 200
assert b'Please log in to manage your surveys' in rv.data
with client.session_transaction() as sess:
sess['username'] = 'ruddock_pres'
rv = client.get(flask.url_for('voting.my_surveys'))
assert rv.status_code == 403
with client.session_transaction() as sess:
sess['username'] = 'csander'
rv = client.get(flask.url_for('voting.my_surveys'))
assert rv.status_code == 200
assert b'ABC' in rv.data and b'Ruddock only' in rv.data
def test_edit_questions(client):
# Test all restrictions
rv = client.get(
flask.url_for(
'voting.edit_questions', access_key='invalid-access-key'))
assert rv.status_code == 403
with client.session_transaction() as sess:
sess['username'] = 'csander'
rv = client.get(
flask.url_for(
'voting.edit_questions', access_key='invalid-access-key'))
assert rv.status_code == 200
assert b'Invalid access key' in rv.data
assert b'Editing survey questions' not in rv.data
with client.session_transaction() as sess:
sess['username'] = 'reng'
access_key = list(
helpers.get_visible_surveys(helpers.get_user_id('csander')))[0][
'access_key']
rv = client.get(
flask.url_for('voting.edit_questions', access_key=access_key))
assert rv.status_code == 200
assert b'You are not the creator of this survey' in rv.data
assert b'Editing survey questions' not in rv.data
with client.session_transaction() as sess:
sess['username'] = 'csander'
client.post(
flask.url_for('voting.make_survey'),
data=dict(
title='Already closed',
description='',
start_date='2018-05-01',
start_hour='12',
start_minute='00',
start_period='P',
end_date='2018-05-03',
end_hour='12',
end_minute='00',
end_period='P',
group=''),
follow_redirects=False)
closed_access_key = helpers.get_closed_surveys(
helpers.get_user_id('csander'))[0]['access_key']
rv = client.get(
flask.url_for('voting.edit_questions', access_key=closed_access_key))
assert rv.status_code == 200
assert b'Cannot modify a survey after it has closed' in rv.data
assert b'Editing survey questions' not in rv.data
# Test success cases
rv = client.get(
flask.url_for('voting.edit_questions', access_key=access_key))
assert rv.status_code == 200
assert b'Editing survey questions' in rv.data
assert b'someResponses = true' in rv.data
access_key2 = list(
helpers.get_visible_surveys(helpers.get_user_id('csander')))[1][
'access_key']
rv = client.get(
flask.url_for('voting.edit_questions', access_key=access_key2))
assert rv.status_code == 200
assert b'Editing survey questions' in rv.data
assert b'someResponses = false' in rv.data
# Test saving questions
rv = client.post(
flask.url_for('voting.save_questions', access_key=closed_access_key),
data='[]')
assert rv.status_code == 200
assert b'Cannot modify a survey after it has closed' in rv.data
rv = client.post(
flask.url_for('voting.save_questions', access_key=access_key2),
data=
'[{"title":"Added question","description":"","type":1,"choices":["choice A","choice B"]}]'
)
assert rv.status_code == 200
assert json.loads(rv.data) == {'success': True}
rv = client.get(
flask.url_for('voting.edit_questions', access_key=access_key2))
assert rv.status_code == 200
assert b'Editing survey questions' in rv.data
assert b'Added question' in rv.data and b'choice A' in rv.data and b'choice B' in rv.data
def test_edit_params(client):
# Test that (some) restrictions are applied
rv = client.get(
flask.url_for('voting.edit_params', access_key='invalid-access-key'))
assert rv.status_code == 403
with client.session_transaction() as sess:
sess['username'] = 'csander'
rv = client.get(
flask.url_for('voting.edit_params', access_key='invalid-access-key'))
assert rv.status_code == 200
assert b'Invalid access key' in rv.data
assert b'Editing survey' not in rv.data
# Test successful case
access_key = list(
helpers.get_visible_surveys(helpers.get_user_id('csander')))[0][
'access_key']
rv = client.get(flask.url_for('voting.edit_params', access_key=access_key))
assert rv.status_code == 200
assert b'Editing survey' in rv.data
assert b"value='ABC'" in rv.data
assert b'New description' not in rv.data
yesterday = date.today() + timedelta(days=-1)
tomorrow = date.today() + timedelta(days=1)
rv = client.post(
flask.url_for('voting.save_params', access_key=access_key),
data=dict(
title='ABC',
description='New description',
start_date=yesterday.strftime(helpers.YYYY_MM_DD),
start_hour='12',
start_minute='00',
start_period='P',
end_date=tomorrow.strftime(helpers.YYYY_MM_DD),
end_hour='12',
end_minute='00',
end_period='P',
group=''),
follow_redirects=False)
assert rv.status_code == 302
assert rv.location == flask.url_for(
'voting.edit_questions', access_key=access_key)
rv = client.get(flask.url_for('voting.edit_params', access_key=access_key))
assert rv.status_code == 200
assert b'New description' in rv.data # assert that description has changed
# Error cases for saving params
with client.session_transaction() as sess:
sess['username'] = 'reng'
rv = client.post(
flask.url_for('voting.save_params', access_key=access_key),
follow_redirects=False)
assert rv.status_code == 200
assert b'You are not the creator of this survey' in rv.data
with client.session_transaction() as sess:
del sess['username']
rv = client.post(
flask.url_for('voting.save_params', access_key=access_key),
follow_redirects=False)
assert rv.status_code == 403
def test_close(client):
with client.session_transaction() as sess:
sess['username'] = 'csander'
def make_survey(start_date):
rv = client.post(
flask.url_for('voting.make_survey'),
data=dict(
title='ABC',
description='',
start_date=start_date.strftime(helpers.YYYY_MM_DD),
start_hour='12',
start_minute='00',
start_period='P',
end_date=(start_date + timedelta(
days=2)).strftime(helpers.YYYY_MM_DD),
end_hour='12',
end_minute='00',
end_period='P',
public='on',
group=''),
follow_redirects=False)
assert rv.status_code == 302
return re.search(r'[A-Za-z0-9]{64}', rv.location)[0]
past = make_survey(date.today() + timedelta(days=-3))
present = make_survey(date.today() + timedelta(days=-1))
future = make_survey(date.today() + timedelta(days=1))
# Test error cases
rv = client.get(flask.url_for('voting.close_survey', access_key=past))
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Cannot modify a survey after it has closed'
}
rv = client.get(flask.url_for('voting.close_survey', access_key=future))
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Survey has not opened yet'
}
# Test successful case
rv = client.get(flask.url_for('voting.close_survey', access_key=present))
assert rv.status_code == 200
assert json.loads(rv.data) == {'success': True}
rv = client.get(flask.url_for('voting.close_survey', access_key=present))
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Cannot modify a survey after it has closed'
}
def test_delete(client):
# Test error cases
rv = client.delete(
flask.url_for('voting.delete_survey', access_key='invalid-access-key'))
assert rv.status_code == 403
with client.session_transaction() as sess:
sess['username'] = 'reng'
rv = client.delete(
flask.url_for('voting.delete_survey', access_key='invalid-access-key'))
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Invalid access key'
}
access_key = helpers.get_closed_surveys(
helpers.get_user_id('csander'))[0]['access_key']
rv = client.delete(
flask.url_for('voting.delete_survey', access_key=access_key))
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'You are not the creator of this survey'
}
with client.session_transaction() as sess:
sess['username'] = 'csander'
# Test successful case
rv = client.delete(
flask.url_for('voting.delete_survey', access_key=access_key))
assert rv.status_code == 200
assert json.loads(rv.data) == {'success': True}
# Test that it was deleted
rv = client.delete(
flask.url_for('voting.delete_survey', access_key=access_key))
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Invalid access key'
}
def test_submit(client):
with client.session_transaction() as sess:
sess['username'] = 'csander'
yesterday = date.today() + timedelta(days=-1)
tomorrow = date.today() + timedelta(days=1)
rv = client.post(
flask.url_for('voting.make_survey'),
data=dict(
title='Response test',
description='',
start_date=yesterday.strftime(helpers.YYYY_MM_DD),
start_hour='12',
start_minute='00',
start_period='P',
end_date=tomorrow.strftime(helpers.YYYY_MM_DD),
end_hour='12',
end_minute='00',
end_period='P',
public='on',
group=''),
follow_redirects=False)
access_key = [
survey
for survey in helpers.get_visible_surveys(
helpers.get_user_id('csander'))
if survey['title'] == 'Response test'
][0]['access_key']
assert rv.status_code == 302
assert rv.location == flask.url_for(
'voting.edit_questions', access_key=access_key)
with client.session_transaction() as sess:
del sess['username']
survey_id = helpers.get_survey_data(access_key)['survey_id']
question_types = helpers.get_question_types()
helpers.set_questions(survey_id, [{ # question id 8
'title': 'Question A',
'description': '',
'type': question_types['Dropdown'],
'choices': ['1', '2', '3'] # choices 12, 13, 14
}, { # question id 9
'title': 'Question B',
'description': 'bbb',
'type': question_types['Short text']
}, { # question id 10
'title': 'Question C',
'description': 'ccc',
'type': question_types['Checkboxes'],
'choices': ['a', 'b', 'c'] # choices 15, 16, 17
}, { # question id 11
'title': 'Question D',
'description': '',
'type': question_types['Long text']
}, { # question id 12
'title': 'Question E',
'description': '',
'type': question_types['Elected position'],
'choices': ['do', 're', 'me'] # choices 18, 19, 20
}])
# Test (some) restriction
rv = client.post(
flask.url_for('voting.submit', access_key=access_key), data='')
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Must be logged in to take survey'
}
with client.session_transaction() as sess:
sess['username'] = 'csander'
# Test questions match
rv = client.post(
flask.url_for('voting.submit', access_key=access_key),
data='{"responses":[{"question":1}]}')
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Survey questions have changed'
}
# Test response value errors
rv = client.post(
flask.url_for('voting.submit', access_key=access_key),
data="""
{
"responses":[
{"question":8,"response":"4"},
{"question":9},
{"question":10},
{"question":11},
{"question":12}
]
}
""")
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Invalid response to dropdown'
}
rv = client.post(
flask.url_for('voting.submit', access_key=access_key),
data="""
{
"responses":[
{"question":8,"response":15},
{"question":9},
{"question":10},
{"question":11},
{"question":12}
]
}
""")
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Invalid choice for dropdown'
}
rv = client.post(
flask.url_for('voting.submit', access_key=access_key),
data="""
{
"responses":[
{"question":8,"response":13},
{"question":9,"response":10},
{"question":10},
{"question":11},
{"question":12}
]
}
""")
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Invalid text response'
}
rv = client.post(
flask.url_for('voting.submit', access_key=access_key),
data="""
{
"responses":[
{"question":8,"response":13},
{"question":9,"response":"shorty"},
{"question":10,"response":10},
{"question":11},
{"question":12}
]
}
""")
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Invalid response to checkboxes'
}
rv = client.post(
flask.url_for('voting.submit', access_key=access_key),
data="""
{
"responses":[
{"question":8,"response":13},
{"question":9,"response":"shorty"},
{"question":10,"response":["3"]},
{"question":11},
{"question":12}
]
}
""")
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Invalid response to checkboxes'
}
rv = client.post(
flask.url_for('voting.submit', access_key=access_key),
data="""
{
"responses":[
{"question":8,"response":13},
{"question":9,"response":"shorty"},
{"question":10,"response":[14]},
{"question":11},
{"question":12}
]
}
""")
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Invalid choice for checkboxes'
}
rv = client.post(
flask.url_for('voting.submit', access_key=access_key),
data="""
{
"responses":[
{"question":8,"response":13},
{"question":9,"response":"shorty"},
{"question":10,"response":[15,17]},
{"question":11,"response":100},
{"question":12}
]
}
""")
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Invalid text response'
}
for position_response in ('NO', [True], [[1]]):
rv = client.post(
flask.url_for('voting.submit', access_key=access_key),
data=json.dumps({
'responses': [{
'question': 8,
'response': 13
}, {
'question': 9,
'response': 'shorty'
}, {
'question': 10,
'response': [15, 17]
}, {
'question': 11,
'response': 'looooooooong'
}, {
'question': 12,
'response': position_response
}]
}))
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Invalid response to elected position'
}
for position_choice in ({'choice_id': 2}, {'user_id': -100}):
rv = client.post(
flask.url_for('voting.submit', access_key=access_key),
data=json.dumps({
'responses': [{
'question': 8,
'response': 13
}, {
'question': 9,
'response': 'shorty'
}, {
'question': 10,
'response': [15, 17]
}, {
'question': 11,
'response': 'looooooooong'
}, {
'question': 12,
'response': [[position_choice]]
}]
}))
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Invalid choice for elected position'
}
duplicate_responses = ([[{'choice_id': 19}, {'choice_id': 19}]], )
duplicate_responses += ([[None], [{'user_id': 3}], [None]], )
for position_response in duplicate_responses:
rv = client.post(
flask.url_for('voting.submit', access_key=access_key),
data=json.dumps({
'responses': [{
'question': 8,
'response': 13
}, {
'question': 9,
'response': 'shorty'
}, {
'question': 10,
'response': [15, 17]
}, {
'question': 11,
'response': 'looooooooong'
}, {
'question': 12,
'response': position_response
}]
}))
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Candidate ranked twice for elected position'
}
rv = client.post(
flask.url_for('voting.submit', access_key=access_key),
data="""
{
"responses":[
{"question":8,"response":13},
{"question":9,"response":"shorty"},
{"question":10,"response":[15,17]},
{"question":11,"response":"looooooooong"},
{"question":12,"response":[[{"user_id":3}],[{"choice_id":19},{"user_id":2}],[null]]}
]
}
""")
assert rv.status_code == 200
assert json.loads(rv.data) == {'success': True}
assert helpers.get_responses(
survey_id, helpers.get_user_id('csander')) == [{
'question_id': 8,
'title': 'Question A',
'description': None,
'type': 1,
'list_order': 0,
'choices': {
12: '1',
13: '2',
14: '3'
},
'responses': [13]
}, {
'question_id': 9,
'title': 'Question B',
'description': 'bbb',
'type': 4,
'list_order': 1,
'choices': 0,
'responses': ['shorty']
}, {
'question_id': 10,
'title': 'Question C',
'description': 'ccc',
'type': 2,
'list_order': 2,
'choices': {
15: 'a',
16: 'b',
17: 'c'
},
'responses': [[15, 17]]
}, {
'question_id':
11,
'title':
'Question D',
'description':
None,
'type':
5,
'list_order':
3,
'choices':
0,
'responses': ['looooooooong']
}, {
'question_id':
12,
'title':
'Question E',
'description':
None,
'type':
3,
'list_order':
4,
'choices': {
18: 'do',
19: 're',
20: 'me'
},
'responses': [[['Belac Sander'], ['re', 'Robert Eng'], ['NO']]]
}]
rv = client.get(flask.url_for('voting.take_survey', access_key=access_key))
assert rv.status_code == 302
assert rv.location == flask.url_for(
'voting.show_my_response', access_key=access_key)
def test_my_response(client):
access_key = [
survey
for survey in helpers.get_visible_surveys(
helpers.get_user_id('csander'))
if survey['title'] == 'Response test'
][0]['access_key']
rv = client.get(
flask.url_for('voting.show_my_response', access_key=access_key))
assert rv.status_code == 200
assert b'Must be logged in to see response' in rv.data
with client.session_transaction() as sess:
sess['username'] = 'csander'
rv = client.get(
flask.url_for('voting.show_my_response', access_key='not-a-real-key'))
assert rv.status_code == 200
assert b'Invalid access key' in rv.data
unresponded_access_key = [
survey
for survey in helpers.get_visible_surveys(
helpers.get_user_id('csander'))
if survey['title'] != 'Response test'
][0]['access_key']
rv = client.get(
flask.url_for(
'voting.show_my_response', access_key=unresponded_access_key))
assert rv.status_code == 200
assert b'You have not responded to this survey' in rv.data
rv = client.get(
flask.url_for('voting.show_my_response', access_key=access_key))
assert rv.status_code == 200
assert b'My responses for Response test' in rv.data
def test_results(client):
rv = client.get(
flask.url_for('voting.show_results', access_key='invalid-access-key'))
assert rv.status_code == 200
assert b'Invalid access key' in rv.data
access_key = [
survey
for survey in helpers.get_visible_surveys(
helpers.get_user_id('csander'))
if survey['title'] == 'Response test'
][0]['access_key']
with client.session_transaction() as sess:
sess['username'] = 'csander'
# Test viewing before close
rv = client.get(
flask.url_for('voting.show_results', access_key=access_key))
assert rv.status_code == 200
assert b'You are not permitted to see the results at this time' in rv.data
# Test releasing before close
rv = client.get(
flask.url_for('voting.release_results', access_key=access_key))
assert rv.status_code == 200
assert b'Survey has not yet finished' in rv.data
# Test after close
yesterday = date.today() + timedelta(days=-1)
rv = client.post(
flask.url_for('voting.save_params', access_key=access_key),
data=dict(
title='Response test',
description='',
start_date=yesterday.strftime(helpers.YYYY_MM_DD),
start_hour='12',
start_minute='00',
start_period='P',
end_date=yesterday.strftime(helpers.YYYY_MM_DD),
end_hour='1',
end_minute='00',
end_period='P',
public='on',
group=''),
follow_redirects=False)
assert rv.status_code == 302
assert rv.location == flask.url_for(
'voting.edit_questions', access_key=access_key)
rv = client.get(
flask.url_for('voting.show_results', access_key=access_key))
assert rv.status_code == 200
assert b'You are not permitted to see the results at this time' not in rv.data
assert b'Responses:' in rv.data
assert b'Question A' in rv.data
assert b'Question B' in rv.data
assert b'Question C' in rv.data
assert b'Question D' in rv.data
assert b'Question E' in rv.data
assert b'Allow others to see results' in rv.data
# Test if not creator after close
with client.session_transaction() as sess:
del sess['username']
rv = client.get(
flask.url_for('voting.show_results', access_key=access_key))
assert rv.status_code == 200
assert b'You are not permitted to see the results at this time' in rv.data
# Test releasing error conditions
rv = client.get(
flask.url_for(
'voting.release_results', access_key='invalid-access-key'),
follow_redirects=False)
assert rv.status_code == 403
with client.session_transaction() as sess:
sess['username'] = 'reng'
rv = client.get(
flask.url_for(
'voting.release_results', access_key='invalid-access-key'),
follow_redirects=False)
assert rv.status_code == 200
assert b'Invalid access key' in rv.data
rv = client.get(
flask.url_for('voting.release_results', access_key=access_key),
follow_redirects=False)
assert rv.status_code == 200
assert b'You are not the creator of this survey' in rv.data
# Test successful release
with client.session_transaction() as sess:
sess['username'] = 'csander'
rv = client.get(
flask.url_for('voting.release_results', access_key=access_key),
follow_redirects=False)
assert rv.status_code == 302
assert rv.location == flask.url_for(
'voting.show_results', access_key=access_key)
# Test if not creator, but results released
with client.session_transaction() as sess:
del sess['username']
rv = client.get(
flask.url_for('voting.show_results', access_key=access_key))
assert rv.status_code == 200
assert b'You are not permitted to see the results at this time' not in rv.data
assert b'Responses:' in rv.data
assert b'Question A' in rv.data
assert b'Question B' in rv.data
assert b'Question C' in rv.data
assert b'Question D' in rv.data
assert b'Question E' in rv.data
assert b'Allow others to see results' not in rv.data
| 34.410596
| 535
| 0.55006
| 5,391
| 46,764
| 4.601373
| 0.059544
| 0.056599
| 0.035032
| 0.05414
| 0.82093
| 0.790051
| 0.770177
| 0.751189
| 0.725994
| 0.683262
| 0
| 0.026242
| 0.305727
| 46,764
| 1,358
| 536
| 34.435935
| 0.737795
| 0.020422
| 0
| 0.679811
| 0
| 0.002366
| 0.254043
| 0.047946
| 0
| 0
| 0
| 0
| 0.162461
| 1
| 0.021293
| false
| 0
| 0.007098
| 0
| 0.02918
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
688cc9797d54552b61af4197f1cbf7caed2fdb2f
| 9,217
|
py
|
Python
|
tests/test_lfw_format.py
|
IRDonch/datumaro
|
d029e67549b7359c887bd15039997bd8bbae7c0c
|
[
"MIT"
] | null | null | null |
tests/test_lfw_format.py
|
IRDonch/datumaro
|
d029e67549b7359c887bd15039997bd8bbae7c0c
|
[
"MIT"
] | null | null | null |
tests/test_lfw_format.py
|
IRDonch/datumaro
|
d029e67549b7359c887bd15039997bd8bbae7c0c
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
import os.path as osp
import numpy as np
from datumaro.components.annotation import Label, Points
from datumaro.components.dataset import Dataset
from datumaro.components.extractor import DatasetItem
from datumaro.plugins.lfw_format import LfwConverter, LfwImporter
from datumaro.util.image import Image
from datumaro.util.test_utils import TestDir, compare_datasets
from .requirements import Requirements, mark_requirement
class LfwFormatTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id='name0_0001', subset='test',
image=np.ones((2, 5, 3)),
annotations=[Label(0, attributes={
'positive_pairs': ['name0/name0_0002']
})]
),
DatasetItem(id='name0_0002', subset='test',
image=np.ones((2, 5, 3)),
annotations=[Label(0, attributes={
'positive_pairs': ['name0/name0_0001'],
'negative_pairs': ['name1/name1_0001']
})]
),
DatasetItem(id='name1_0001', subset='test',
image=np.ones((2, 5, 3)),
annotations=[Label(1, attributes={
'positive_pairs': ['name1/name1_0002']
})]
),
DatasetItem(id='name1_0002', subset='test',
image=np.ones((2, 5, 3)),
annotations=[Label(1, attributes={
'positive_pairs': ['name1/name1_0002'],
'negative_pairs': ['name0/name0_0001']
})]
),
], categories=['name0', 'name1'])
with TestDir() as test_dir:
LfwConverter.convert(source_dataset, test_dir,
save_images=True)
parsed_dataset = Dataset.import_from(test_dir, 'lfw')
compare_datasets(self, source_dataset, parsed_dataset,
require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_with_no_save_images(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id='name0_0001', subset='test',
image=np.ones((2, 5, 3)),
annotations=[Label(0, attributes={
'positive_pairs': ['name0/name0_0002']
})]
),
DatasetItem(id='name0_0002', subset='test',
image=np.ones((2, 5, 3)),
annotations=[Label(0, attributes={
'positive_pairs': ['name0/name0_0001'],
'negative_pairs': ['name1/name1_0001']
})]
),
DatasetItem(id='name1_0001', subset='test',
image=np.ones((2, 5, 3)),
annotations=[Label(1, attributes={})]
),
], categories=['name0', 'name1'])
with TestDir() as test_dir:
LfwConverter.convert(source_dataset, test_dir,
save_images=False)
parsed_dataset = Dataset.import_from(test_dir, 'lfw')
compare_datasets(self, source_dataset, parsed_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_with_landmarks(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id='name0_0001',
subset='test', image=np.ones((2, 5, 3)),
annotations=[
Label(0, attributes={
'positive_pairs': ['name0/name0_0002']
}),
Points([0, 4, 3, 3, 2, 2, 1, 0, 3, 0]),
]
),
DatasetItem(id='name0_0002',
subset='test', image=np.ones((2, 5, 3)),
annotations=[
Label(0),
Points([0, 5, 3, 5, 2, 2, 1, 0, 3, 0]),
]
),
], categories=['name0'])
with TestDir() as test_dir:
LfwConverter.convert(source_dataset, test_dir, save_images=True)
parsed_dataset = Dataset.import_from(test_dir, 'lfw')
compare_datasets(self, source_dataset, parsed_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_with_no_subsets(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id='name0_0001',
image=np.ones((2, 5, 3)),
annotations=[Label(0, attributes={
'positive_pairs': ['name0/name0_0002']
})],
),
DatasetItem(id='name0_0002',
image=np.ones((2, 5, 3)),
annotations=[Label(0)]
),
], categories=['name0'])
with TestDir() as test_dir:
LfwConverter.convert(source_dataset, test_dir, save_images=True)
parsed_dataset = Dataset.import_from(test_dir, 'lfw')
compare_datasets(self, source_dataset, parsed_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_with_no_format_names(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id='a/1',
image=np.ones((2, 5, 3)),
annotations=[Label(0, attributes={
'positive_pairs': ['name0/b/2'],
'negative_pairs': ['d/4']
})],
),
DatasetItem(id='b/2',
image=np.ones((2, 5, 3)),
annotations=[Label(0)]
),
DatasetItem(id='c/3',
image=np.ones((2, 5, 3)),
annotations=[Label(1)]
),
DatasetItem(id='d/4',
image=np.ones((2, 5, 3)),
),
], categories=['name0', 'name1'])
with TestDir() as test_dir:
LfwConverter.convert(source_dataset, test_dir, save_images=True)
parsed_dataset = Dataset.import_from(test_dir, 'lfw')
compare_datasets(self, source_dataset, parsed_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_dataset_with_cyrillic_and_spaces_in_filename(self):
dataset = Dataset.from_iterable([
DatasetItem(id='кириллица с пробелом',
image=np.ones((2, 5, 3))
),
DatasetItem(id='name0_0002',
image=np.ones((2, 5, 3)),
annotations=[Label(0, attributes={
'negative_pairs': ['кириллица с пробелом']
})]
),
], categories=['name0'])
with TestDir() as test_dir:
LfwConverter.convert(dataset, test_dir, save_images=True)
parsed_dataset = Dataset.import_from(test_dir, 'lfw')
compare_datasets(self, dataset, parsed_dataset, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_image_with_arbitrary_extension(self):
dataset = Dataset.from_iterable([
DatasetItem(id='a/1', image=Image(
path='a/1.JPEG', data=np.zeros((4, 3, 3))),
),
DatasetItem(id='b/c/d/2', image=Image(
path='b/c/d/2.bmp', data=np.zeros((3, 4, 3))),
),
], categories=[])
with TestDir() as test_dir:
LfwConverter.convert(dataset, test_dir, save_images=True)
parsed_dataset = Dataset.import_from(test_dir, 'lfw')
compare_datasets(self, dataset, parsed_dataset, require_images=True)
DUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'lfw_dataset')
class LfwImporterTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect(self):
self.assertTrue(LfwImporter.detect(DUMMY_DATASET_DIR))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='name0_0001', subset='test',
image=np.ones((2, 5, 3)),
annotations=[
Label(0, attributes={
'negative_pairs': ['name1/name1_0001',
'name1/name1_0002']
}),
Points([0, 4, 3, 3, 2, 2, 1, 0, 3, 0]),
]
),
DatasetItem(id='name1_0001', subset='test',
image=np.ones((2, 5, 3)),
annotations=[
Label(1, attributes={
'positive_pairs': ['name1/name1_0002'],
}),
Points([1, 6, 4, 6, 3, 3, 2, 1, 4, 1]),
]
),
DatasetItem(id='name1_0002', subset='test',
image=np.ones((2, 5, 3)),
annotations=[
Label(1),
Points([0, 5, 3, 5, 2, 2, 1, 0, 3, 0]),
]
),
], categories=['name0', 'name1'])
dataset = Dataset.import_from(DUMMY_DATASET_DIR, 'lfw')
compare_datasets(self, expected_dataset, dataset)
| 38.404167
| 80
| 0.5364
| 964
| 9,217
| 4.893154
| 0.107884
| 0.060632
| 0.04664
| 0.05088
| 0.795633
| 0.787365
| 0.781429
| 0.771465
| 0.769345
| 0.723553
| 0
| 0.051673
| 0.338613
| 9,217
| 239
| 81
| 38.564854
| 0.722113
| 0
| 0
| 0.717703
| 0
| 0
| 0.090593
| 0
| 0
| 0
| 0
| 0
| 0.004785
| 1
| 0.043062
| false
| 0
| 0.100478
| 0
| 0.15311
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d7a5e5b05587f4827d617bcfc980e6852b32b7e8
| 4,606
|
py
|
Python
|
src/main.py
|
MrSpadala/2PL-tester
|
8911660ad874a485d62cd3ae7cbeb3f7bf1bf96a
|
[
"MIT"
] | null | null | null |
src/main.py
|
MrSpadala/2PL-tester
|
8911660ad874a485d62cd3ae7cbeb3f7bf1bf96a
|
[
"MIT"
] | null | null | null |
src/main.py
|
MrSpadala/2PL-tester
|
8911660ad874a485d62cd3ae7cbeb3f7bf1bf96a
|
[
"MIT"
] | 1
|
2022-02-07T11:39:08.000Z
|
2022-02-07T11:39:08.000Z
|
from flask import Flask
from flask import request
from check2PL import solve2PL
from checkConflict import solveConflict
from checkTimestamps import solveTimestamps
from utils import parse_schedule
app = Flask(__name__)
index_cached = open('../static/index.html', 'r').read()
@app.route("/2PL", methods=['GET', 'POST'])
def index():
# get and check args
schedule = request.args.get('schedule')
use_xl_only = request.args.get('use_xl_only')
response = index_cached
if schedule is None:
return response
schedule = schedule.replace(' ', '')
if schedule == '':
return format_response('Empty schedule', response)
sched_parsed = parse_schedule(schedule)
print(sched_parsed)
if type(sched_parsed) == str: #parsing error message
return format_response('Parsing error: '+sched_parsed, response)
# Solve
res2PL = solve2PL(sched_parsed, use_xl_only)
resConfl = solveConflict(sched_parsed)
resTS = solveTimestamps(sched_parsed)
# Format results for conflict serializability
msg = '<b><i>Conflict serializability</i></b><br>'
msg += 'Is the schedule conflict serializable: <i>'+str(resConfl)+'</i>'
response = format_response(msg, response)
# Format results for 2PL
msg = '<b><i>Two phase lock protocol</i></b><br>'
if res2PL['sol'] is None:
#return format_response('<br>'+res2PL['err']+'<br><br>'+res2PL['partial_locks'])
msg += res2PL['err']
response = format_response(msg, response)
else:
msg += """
Solution: {}, <br>
Is the schedule strict-2PL: <i>{}</i>, <br>
Is the schedule strong strict-2PL: <i>{}</i>
""".format(res2PL['sol'], res2PL['strict'], res2PL['strong'])
response = format_response(msg, response)
# Format results for timestamps
msg = '<b><i>Timestamps (DRAFT)</i></b><br>'
if resTS['err'] is None:
msg += 'List of executed operations: '+str(resTS['sol'])+'<br>'
msg += 'List of waiting transactions at the end of schedule: '+str(resTS['waiting_tx'])+'<br>'
response = format_response(msg, response)
else:
msg += resTS['err']+'<br>'
response = format_response(msg, response)
return response
@app.route("/solve", methods=['POST'])
def solve():
# get and check args
schedule = request.form.get('schedule')
use_xl_only = request.form.get('use_xl_only')
response = open('solve.html', 'r').read()
if schedule is None:
return response
schedule = schedule.replace(' ', '')
if schedule == '':
return format_response('Empty schedule', response),400
sched_parsed = parse_schedule(schedule)
print(sched_parsed)
result_http_code = 200
if type(sched_parsed) == str: # parsing error message
return format_response('Parsing error: ' + sched_parsed, response),400
# Solve
res2PL = solve2PL(sched_parsed, use_xl_only)
resConfl = solveConflict(sched_parsed)
resTS = solveTimestamps(sched_parsed)
# Format results for conflict serializability
msg = '<b><i>Conflict serializability</i></b><br>'
msg += 'Is the schedule conflict serializable: <i>' + str(resConfl) + '</i>'
response = format_response(msg, response)
# Format results for 2PL
msg = '<b><i>Two phase lock protocol</i></b><br>'
if res2PL['sol'] is None:
# return format_response('<br>'+res2PL['err']+'<br><br>'+res2PL['partial_locks'])
msg += res2PL['err']
response = format_response(msg, response)
else:
msg += """
Solution: {} <br>
Is the schedule strict-2PL: <i>{}</i> <br>
Is the schedule strong strict-2PL: <i>{}</i>
""".format(res2PL['sol'], res2PL['strict'], res2PL['strong'])
response = format_response(msg, response)
# Format results for timestamps
msg = '<b><i>Timestamps (DRAFT)</i></b><br>'
if resTS['err'] is None:
msg += 'List of executed operations: ' + str(resTS['sol']) + '<br>'
msg += 'List of waiting transactions at the end of schedule: ' + str(resTS['waiting_tx']) + '<br>'
response = format_response(msg, response)
else:
msg += resTS['err'] + '<br>'
response = format_response(msg, response)
return response,result_http_code
def format_response(msg, res):
return res.replace('<!---->', '<br>'+msg+'<br><!---->')
if __name__ == "__main__":
from os.path import isfile
debug = isfile('.DEBUG')
host = "localhost" if debug else "0.0.0.0"
app.run(host=host, port=5000, debug=debug)
| 31.547945
| 106
| 0.627008
| 570
| 4,606
| 4.947368
| 0.184211
| 0.084397
| 0.066312
| 0.088652
| 0.815603
| 0.801418
| 0.760993
| 0.760993
| 0.72695
| 0.72695
| 0
| 0.012232
| 0.219062
| 4,606
| 145
| 107
| 31.765517
| 0.771754
| 0.096613
| 0
| 0.635417
| 0
| 0
| 0.260796
| 0.013028
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.072917
| 0.010417
| 0.197917
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d7b24c8ca7adbb62fe2013b2416aa2a35ca6ffbb
| 48
|
py
|
Python
|
src/opendatablend/__init__.py
|
sscaress/opendatablend-py
|
976b514cfa0b9c7d41f4edc1a1c118c8c5a4fd6d
|
[
"MIT"
] | null | null | null |
src/opendatablend/__init__.py
|
sscaress/opendatablend-py
|
976b514cfa0b9c7d41f4edc1a1c118c8c5a4fd6d
|
[
"MIT"
] | null | null | null |
src/opendatablend/__init__.py
|
sscaress/opendatablend-py
|
976b514cfa0b9c7d41f4edc1a1c118c8c5a4fd6d
|
[
"MIT"
] | null | null | null |
from opendatablend.opendatablend import get_data
| 48
| 48
| 0.916667
| 6
| 48
| 7.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 48
| 1
| 48
| 48
| 0.955556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0bf6e96cb5f32b5632ec9ac01e9f0991a95a8aa5
| 1,193
|
py
|
Python
|
py_modular/utils/processing.py
|
fredyeah/streamer
|
079b8cb5d6002df50f13aa184ece7a4a3b357da4
|
[
"MIT"
] | 5
|
2021-05-20T00:59:41.000Z
|
2021-12-05T18:25:57.000Z
|
py_modular/utils/processing.py
|
fredyeah/streamer
|
079b8cb5d6002df50f13aa184ece7a4a3b357da4
|
[
"MIT"
] | null | null | null |
py_modular/utils/processing.py
|
fredyeah/streamer
|
079b8cb5d6002df50f13aa184ece7a4a3b357da4
|
[
"MIT"
] | 1
|
2021-05-30T12:43:02.000Z
|
2021-05-30T12:43:02.000Z
|
from math import floor, asin, sin, pi
def window_grains_sin(grains=[]):
"""Util used to window an array of audio with half a sine wave
:param grains: An array of audio buffers that contain audio data to be windowed
:type grains: array(array)
:returns: An array of the same shape as the input, but windowed
:rtype: array(array)
"""
for index, grain in enumerate(grains):
print('windowed ' + str(index) + ' grains')
grain_length = len(grain)
for i in range(grain_length):
grain[i] = grain[i] * sin(i * pi / grain_length)
return grains
def window_grains_tri(grains=[]):
"""Util used to window an array of audio with half a triangle wave
:param grains: An array of audio buffers that contain audio data to be windowed
:type grains: array(array)
:returns: An array of the same shape as the input, but windowed
:rtype: array(array)
"""
for index, grain in enumerate(grains):
print('windowed ' + str(index) + ' grains')
grain_length = len(grain)
for i in range(grain_length):
grain[i] = grain[i] * (asin(sin(i * pi / grain_length)) / (0.5 * pi))
return grains
| 37.28125
| 83
| 0.642079
| 177
| 1,193
| 4.271186
| 0.305085
| 0.055556
| 0.071429
| 0.074074
| 0.857143
| 0.812169
| 0.812169
| 0.812169
| 0.812169
| 0.812169
| 0
| 0.002252
| 0.255658
| 1,193
| 31
| 84
| 38.483871
| 0.849099
| 0.426655
| 0
| 0.666667
| 0
| 0
| 0.050713
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.066667
| 0
| 0.333333
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
042977f0af674cf3ca4f28fd9e848d23764fa207
| 67
|
py
|
Python
|
pyece/core/property/__init__.py
|
rilshok/pyece
|
eaa78f175a922b99fd0bf5157ba129bf495203e3
|
[
"MIT"
] | null | null | null |
pyece/core/property/__init__.py
|
rilshok/pyece
|
eaa78f175a922b99fd0bf5157ba129bf495203e3
|
[
"MIT"
] | null | null | null |
pyece/core/property/__init__.py
|
rilshok/pyece
|
eaa78f175a922b99fd0bf5157ba129bf495203e3
|
[
"MIT"
] | null | null | null |
from .base import *
from .point import *
from .pointcloud import *
| 16.75
| 25
| 0.731343
| 9
| 67
| 5.444444
| 0.555556
| 0.408163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179104
| 67
| 3
| 26
| 22.333333
| 0.890909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
045a16856d919e5772728374d499ad5d309eb0c0
| 285
|
py
|
Python
|
src/errors.py
|
hyper-neutrino/bots-reforged
|
cbb4d34f2e40d460301077c8d58d3619e71f4406
|
[
"MIT"
] | null | null | null |
src/errors.py
|
hyper-neutrino/bots-reforged
|
cbb4d34f2e40d460301077c8d58d3619e71f4406
|
[
"MIT"
] | null | null | null |
src/errors.py
|
hyper-neutrino/bots-reforged
|
cbb4d34f2e40d460301077c8d58d3619e71f4406
|
[
"MIT"
] | null | null | null |
class BotError(RuntimeError):
def __init__(self, message = "An unexpected error occurred with the bot!"):
self.message = message
class DataError(RuntimeError):
def __init__(self, message = "An unexpected error occurred when accessing/saving data!"):
self.message = message
| 40.714286
| 91
| 0.750877
| 35
| 285
| 5.885714
| 0.542857
| 0.213592
| 0.184466
| 0.223301
| 0.533981
| 0.533981
| 0.533981
| 0.533981
| 0.533981
| 0
| 0
| 0
| 0.154386
| 285
| 7
| 92
| 40.714286
| 0.854772
| 0
| 0
| 0.333333
| 0
| 0
| 0.342657
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
f0bb6a461d2617c7c17a08e8e18ef1f4c1de29f2
| 18,422
|
py
|
Python
|
main/ensemble.py
|
ejklektov/dcase20-task3-seld
|
dd32cb5b23d48f10526f89e4ef1baf09947d6cc5
|
[
"MIT"
] | null | null | null |
main/ensemble.py
|
ejklektov/dcase20-task3-seld
|
dd32cb5b23d48f10526f89e4ef1baf09947d6cc5
|
[
"MIT"
] | null | null | null |
main/ensemble.py
|
ejklektov/dcase20-task3-seld
|
dd32cb5b23d48f10526f89e4ef1baf09947d6cc5
|
[
"MIT"
] | null | null | null |
import argparse
import os
import pdb
import shutil
from timeit import default_timer as timer
import numpy as np
import pandas as pd
from tqdm import tqdm
from evaluation import write_submission
def iters_ensemble(args):
'''
Ensemble on different iterations and generate ensembled files in fusioned folder
'''
## directories
if args.task_type == 'sed_only':
# iterations ensemble directory
fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'sed_mask_fusioned')
os.makedirs(fusioned_dir, exist_ok=True)
fusion_fn = '_fusion_sed_epoch_{}'
iterator = range(38, 42, 2)
elif args.task_type == 'two_staged_eval':
# iterations ensemble directory
fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'doa_fusioned')
os.makedirs(fusioned_dir, exist_ok=True)
fusion_fn = '_fusion_doa_epoch_{}'
iterator = range(78, 82, 2)
## average ensemble
print('\n===> Average ensemble')
ensemble_start_time = timer()
predicts_fusioned = []
for epoch_num in iterator:
fusion_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), fusion_fn.format(epoch_num))
for fn in sorted(os.listdir(fusion_dir)):
if fn.endswith('.csv') and not fn.startswith('.'):
fn_path = os.path.join(fusion_dir, fn)
predicts_fusioned.append(pd.read_csv(fn_path, header=0, index_col=0).values)
if len(predicts_fusioned) > file_num:
for n in range(file_num):
min_len = min(predicts_fusioned[n].shape[0], predicts_fusioned[n+file_num].shape[0])
predicts_fusioned[n] = (predicts_fusioned[n][:min_len,:] + predicts_fusioned[n+file_num][:min_len,:]) / 2
predicts_fusioned = predicts_fusioned[:file_num]
print('\nAverage ensemble time: {:.3f} s.'.format(timer()-ensemble_start_time))
## write the fusioned sed probabilities or doa predictions to fusioned files
print('\n===> Write the fusioned sed probabilities or doa predictions to fusioned files')
# this folder here is only used for supplying fn
iterator = tqdm(sorted(os.listdir(fusion_dir)), total=len(os.listdir(fusion_dir)), unit='iters')
n = 0
for fn in iterator:
if fn.endswith('.csv') and not fn.startswith('.'):
# write to sed_mask_fusioned folder
fn_path = os.path.join(fusioned_dir, fn)
df_output = pd.DataFrame(predicts_fusioned[n])
df_output.to_csv(fn_path)
n += 1
iterator.close()
print('\n' + fusioned_dir)
print('\n===> Iterations ensemble finished!')
def threshold_iters_ensemble(args):
'''
Threshold the ensembled iterations and write to submissions
'''
# directories
sed_mask_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'sed_mask_fusioned')
doa_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'doa_fusioned')
if args.task_type == 'sed_only':
test_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'sed_test_fusioned')
elif args.task_type == 'two_staged_eval':
test_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'all_test_fusioned')
os.makedirs(test_fusioned_dir, exist_ok=True)
if args.task_type == 'sed_only':
iterator = tqdm(sorted(os.listdir(sed_mask_fusioned_dir)), total=len(os.listdir(sed_mask_fusioned_dir)), unit='iters')
for fn in iterator:
if fn.endswith('_prob.csv') and not fn.startswith('.'):
fn_path = os.path.join(sed_mask_fusioned_dir, fn)
prob_fusioned = pd.read_csv(fn_path, header=0, index_col=0).values
# write to sed_test_fusioned
fn_noextension = fn.split('_prob')[0]
output_doas = np.zeros((prob_fusioned.shape[0],22))
submit_dict = {
'filename': fn_noextension,
'events': (prob_fusioned>args.threshold).astype(np.float32),
'doas': output_doas
}
write_submission(submit_dict, test_fusioned_dir)
if args.task_type == 'two_staged_eval':
iterator = tqdm(sorted(os.listdir(doa_fusioned_dir)), total=len(os.listdir(doa_fusioned_dir)), unit='iters')
for fn in iterator:
if fn.endswith('_doa.csv') and not fn.startswith('.'):
fn_noextension = fn.split('_doa')[0]
# read sed predictions from sed_mask_fusioned directory
fn_path = os.path.join(sed_mask_fusioned_dir, fn_noextension + '_prob.csv')
prob_fusioned = pd.read_csv(fn_path, header=0, index_col=0).values
# read doa predictions from doa_fusioned directory
fn_path = os.path.join(doa_fusioned_dir, fn)
doa_fusioned = pd.read_csv(fn_path, header=0, index_col=0).values
# write to all_test_fusioned
submit_dict = {
'filename': fn_noextension,
'events': (prob_fusioned>args.threshold).astype(np.float32),
'doas': doa_fusioned
}
write_submission(submit_dict, test_fusioned_dir)
iterator.close()
print('\n' + test_fusioned_dir)
print('\n===> Threshold iterations ensemble finished!')
def models_ensemble(args):
'''
Ensemble on different iterations and generate ensembled files in fusioned folder
'''
# directories
if args.task_type == 'sed_only':
fusion_folder = 'sed_mask_fusioned'
fusioned_folder = 'sed_mask_models_fusioned'
elif args.task_type == 'two_staged_eval':
fusion_folder = 'doa_fusioned'
fusioned_folder = 'doa_models_fusioned'
print('\n===> Model average ensemble')
ensemble_start_time = timer()
predicts_fusioned = []
for model_folder in sorted(os.listdir(submissions_dir)):
if not model_folder.startswith('.') and model_folder != 'models_ensemble':
print('\n' + model_folder)
fusion_dir = os.path.join(submissions_dir, model_folder, fusion_folder)
for fn in sorted(os.listdir(fusion_dir)):
if fn.endswith('.csv') and not fn.startswith('.'):
fn_path = os.path.join(fusion_dir, fn)
predicts_fusioned.append(pd.read_csv(fn_path, header=0, index_col=0).values)
if len(predicts_fusioned) > file_num:
for n in range(file_num):
min_len = min(predicts_fusioned[n].shape[0], predicts_fusioned[n+file_num].shape[0])
predicts_fusioned[n] = (predicts_fusioned[n][:min_len,:] + predicts_fusioned[n+file_num][:min_len,:]) / 2
predicts_fusioned = predicts_fusioned[:file_num]
print('\nAverage ensemble time: {:.3f} s.'.format(timer()-ensemble_start_time))
## write the fusioned sed probabilities or doa predictions to fusioned files
print('\n===> Write the fusioned sed probabilities or doa predictions to fusioned files')
# this folder here is only used for supplying fn
iterator = tqdm(sorted(os.listdir(fusion_dir)), total=len(os.listdir(fusion_dir)), unit='iters')
models_ensemble_dir = os.path.join(submissions_dir, 'models_ensemble', fusioned_folder)
os.makedirs(models_ensemble_dir, exist_ok=True)
n = 0
for fn in iterator:
if fn.endswith('.csv') and not fn.startswith('.'):
# write to sed_mask_fusioned folder
fn_path = os.path.join(models_ensemble_dir, fn)
df_output = pd.DataFrame(predicts_fusioned[n])
df_output.to_csv(fn_path)
n += 1
iterator.close()
print('\n' + models_ensemble_dir)
print('\n===> Models ensemble finished!')
def threshold_models_ensemble(args):
'''
Threshold the ensembled models and write to submissions
'''
# directories
sed_mask_fusioned_dir = os.path.join(submissions_dir, 'models_ensemble', 'sed_mask_models_fusioned')
doa_fusioned_dir = os.path.join(submissions_dir, 'models_ensemble', 'doa_models_fusioned')
if args.task_type == 'sed_only':
test_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'sed_test_fusioned')
elif args.task_type == 'two_staged_eval':
test_fusioned_dir = os.path.join(submissions_dir, args.name + '_' + args.model_sed + '_{}'.format(args.audio_type) + '_{}'.format(args.feature_type) +
'_aug_{}'.format(args.data_aug) + '_seed_{}'.format(args.seed), 'all_test_fusioned')
os.makedirs(test_fusioned_dir, exist_ok=True)
if args.task_type == 'sed_only':
iterator = tqdm(sorted(os.listdir(sed_mask_fusioned_dir)), total=len(os.listdir(sed_mask_fusioned_dir)), unit='iters')
for fn in iterator:
if fn.endswith('_prob.csv') and not fn.startswith('.'):
fn_path = os.path.join(sed_mask_fusioned_dir, fn)
prob_fusioned = pd.read_csv(fn_path, header=0, index_col=0).values
# write to sed_test_fusioned
fn_noextension = fn.split('_prob')[0]
output_doas = np.zeros((prob_fusioned.shape[0],22))
submit_dict = {
'filename': fn_noextension,
'events': (prob_fusioned>args.threshold).astype(np.float32),
'doas': output_doas
}
write_submission(submit_dict, test_fusioned_dir)
if args.task_type == 'two_staged_eval':
iterator = tqdm(sorted(os.listdir(doa_fusioned_dir)), total=len(os.listdir(doa_fusioned_dir)), unit='iters')
for fn in iterator:
if fn.endswith('_doa.csv') and not fn.startswith('.'):
fn_noextension = fn.split('_doa')[0]
# read sed predictions from sed_mask_fusioned directory
fn_path = os.path.join(sed_mask_fusioned_dir, fn_noextension + '_prob.csv')
prob_fusioned = pd.read_csv(fn_path, header=0, index_col=0).values
# read doa predictions from doa_fusioned directory
fn_path = os.path.join(doa_fusioned_dir, fn)
doa_fusioned = pd.read_csv(fn_path, header=0, index_col=0).values
# write to all_test_fusioned
submit_dict = {
'filename': fn_noextension,
'events': (prob_fusioned>args.threshold).astype(np.float32),
'doas': doa_fusioned
}
write_submission(submit_dict, test_fusioned_dir)
iterator.close()
print('\n' + test_fusioned_dir)
print('\n===> Threshold models ensemble finished!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Ensemble on different iterations or different models')
subparsers = parser.add_subparsers(dest='mode')
parser_iters_ensemble = subparsers.add_parser('iters_ensemble')
parser_iters_ensemble.add_argument('--workspace', type=str, required=True,
help='workspace directory')
parser_iters_ensemble.add_argument('--feature_type', type=str, required=True,
choices=['logmel', 'logmelgcc', 'logmelintensity', 'logmelgccintensity'])
parser_iters_ensemble.add_argument('--audio_type', type=str, required=True,
choices=['foa', 'mic', 'foa&mic'], help='audio type')
parser_iters_ensemble.add_argument('--task_type', type=str, required=True,
choices=['sed_only', 'doa_only', 'two_staged_eval', 'seld'])
parser_iters_ensemble.add_argument('--model_sed', type=str, default='CRNN10')
parser_iters_ensemble.add_argument('--model_doa', type=str, default='pretrained_CRNN10')
parser_iters_ensemble.add_argument('--data_aug', default='None', type=str,
help='data augmentation methods')
parser_iters_ensemble.add_argument('--seed', default=42, type=int, help='random seed')
parser_iters_ensemble.add_argument('--name', default='n0', type=str)
parser_threshold_iters_ensemble = subparsers.add_parser('threshold_iters_ensemble')
parser_threshold_iters_ensemble.add_argument('--workspace', type=str, required=True,
help='workspace directory')
parser_threshold_iters_ensemble.add_argument('--feature_type', type=str, required=True,
choices=['logmel', 'logmelgcc', 'logmelintensity', 'logmelgccintensity'])
parser_threshold_iters_ensemble.add_argument('--audio_type', type=str, required=True,
choices=['foa', 'mic', 'foa&mic'], help='audio type')
parser_threshold_iters_ensemble.add_argument('--task_type', type=str, required=True,
choices=['sed_only', 'doa_only', 'two_staged_eval', 'seld'])
parser_threshold_iters_ensemble.add_argument('--model_sed', type=str, default='CRNN10')
parser_threshold_iters_ensemble.add_argument('--model_doa', type=str, default='pretrained_CRNN10')
parser_threshold_iters_ensemble.add_argument('--data_aug', default='None', type=str,
help='data augmentation methods')
parser_threshold_iters_ensemble.add_argument('--seed', default=42, type=int, help='random seed')
parser_threshold_iters_ensemble.add_argument('--name', default='n0', type=str)
parser_threshold_iters_ensemble.add_argument('--threshold', default=0.3, type=float)
parser_models_ensemble = subparsers.add_parser('models_ensemble')
parser_models_ensemble.add_argument('--workspace', type=str, required=True,
help='workspace directory')
parser_models_ensemble.add_argument('--feature_type', type=str, required=True,
choices=['logmel', 'logmelgcc', 'logmelintensity', 'logmelgccintensity'])
parser_models_ensemble.add_argument('--audio_type', type=str, required=True,
choices=['foa', 'mic', 'foa&mic'], help='audio type')
parser_models_ensemble.add_argument('--task_type', type=str, required=True,
choices=['sed_only', 'doa_only', 'two_staged_eval', 'seld'])
parser_models_ensemble.add_argument('--model_sed', type=str, default='CRNN10')
parser_models_ensemble.add_argument('--model_doa', type=str, default='pretrained_CRNN10')
parser_models_ensemble.add_argument('--data_aug', default='None', type=str,
help='data augmentation methods')
parser_models_ensemble.add_argument('--seed', default=42, type=int, help='random seed')
parser_models_ensemble.add_argument('--name', default='n0', type=str)
parser_threshold_models_ensemble = subparsers.add_parser('threshold_models_ensemble')
parser_threshold_models_ensemble.add_argument('--workspace', type=str, required=True,
help='workspace directory')
parser_threshold_models_ensemble.add_argument('--feature_type', type=str, required=True,
choices=['logmel', 'logmelgcc', 'logmelintensity', 'logmelgccintensity'])
parser_threshold_models_ensemble.add_argument('--audio_type', type=str, required=True,
choices=['foa', 'mic', 'foa&mic'], help='audio type')
parser_threshold_models_ensemble.add_argument('--task_type', type=str, required=True,
choices=['sed_only', 'doa_only', 'two_staged_eval', 'seld'])
parser_threshold_models_ensemble.add_argument('--model_sed', type=str, default='CRNN10')
parser_threshold_models_ensemble.add_argument('--model_doa', type=str, default='pretrained_CRNN10')
parser_threshold_models_ensemble.add_argument('--data_aug', default='None', type=str,
help='data augmentation methods')
parser_threshold_models_ensemble.add_argument('--seed', default=42, type=int, help='random seed')
parser_threshold_models_ensemble.add_argument('--name', default='n0', type=str)
parser_threshold_models_ensemble.add_argument('--threshold', default=0.3, type=float)
args = parser.parse_args()
# submissions directory
global submissions_dir
submissions_dir = os.path.join(args.workspace, 'appendixes', 'submissions_eval')
global file_num
file_num = 100
# ensemble different iterations or models
if args.mode == 'iters_ensemble':
iters_ensemble(args)
elif args.mode == 'threshold_iters_ensemble':
threshold_iters_ensemble(args)
elif args.mode == 'models_ensemble':
models_ensemble(args)
elif args.mode == 'threshold_models_ensemble':
threshold_models_ensemble(args)
| 52.634286
| 158
| 0.635924
| 2,209
| 18,422
| 4.992757
| 0.076053
| 0.049506
| 0.065464
| 0.041346
| 0.879681
| 0.861638
| 0.84985
| 0.843685
| 0.836613
| 0.812585
| 0
| 0.006491
| 0.239008
| 18,422
| 350
| 159
| 52.634286
| 0.780227
| 0.059114
| 0
| 0.579365
| 0
| 0
| 0.157513
| 0.008467
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015873
| false
| 0
| 0.035714
| 0
| 0.051587
| 0.059524
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9bcefccb6280a6ff7ccc5701be21245777cc634c
| 19,469
|
py
|
Python
|
owlery_client/api/sparql_api.py
|
rpgoldman/owlery-client
|
bd02ee7c071b720604870d76da42a7b1e988332b
|
[
"Apache-2.0"
] | null | null | null |
owlery_client/api/sparql_api.py
|
rpgoldman/owlery-client
|
bd02ee7c071b720604870d76da42a7b1e988332b
|
[
"Apache-2.0"
] | null | null | null |
owlery_client/api/sparql_api.py
|
rpgoldman/owlery-client
|
bd02ee7c071b720604870d76da42a7b1e988332b
|
[
"Apache-2.0"
] | null | null | null |
"""
Owlery API
Owlery provides a web API for an [OWL API](http://owlapi.sourceforge.net)-based reasoner containing a configurable set of ontologies (a \"knowledgebase\"). # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: balhoff@renci.org
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from owlery_client.api_client import ApiClient, Endpoint as _Endpoint
from owlery_client.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
class SPARQLApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __kbs_kb_expand_get(
self,
kb,
query,
**kwargs
):
"""Expand SPARQL query encoded in URL parameter # noqa: E501
Expand a SPARQL query, transforming Owlet-style embedded class expressions into `FILTER`s # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.kbs_kb_expand_get(kb, query, async_req=True)
>>> result = thread.get()
Args:
kb (str): label for a knowledgebase in this Owlery
query (str): SPARQL query
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['kb'] = \
kb
kwargs['query'] = \
query
return self.call_with_http_info(**kwargs)
self.kbs_kb_expand_get = _Endpoint(
settings={
'response_type': None,
'auth': [],
'endpoint_path': '/kbs/{kb}/expand',
'operation_id': 'kbs_kb_expand_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'kb',
'query',
],
'required': [
'kb',
'query',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'kb':
(str,),
'query':
(str,),
},
'attribute_map': {
'kb': 'kb',
'query': 'query',
},
'location_map': {
'kb': 'path',
'query': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/sparql-query'
],
'content_type': [],
},
api_client=api_client,
callable=__kbs_kb_expand_get
)
def __kbs_kb_expand_post(
self,
kb,
body,
**kwargs
):
"""Expand SPARQL query contained in request body # noqa: E501
Expand a SPARQL query, transforming Owlet-style embedded class expressions into `FILTER`s # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.kbs_kb_expand_post(kb, body, async_req=True)
>>> result = thread.get()
Args:
kb (str): label for a knowledgebase in this Owlery
body (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['kb'] = \
kb
kwargs['body'] = \
body
return self.call_with_http_info(**kwargs)
self.kbs_kb_expand_post = _Endpoint(
settings={
'response_type': None,
'auth': [],
'endpoint_path': '/kbs/{kb}/expand',
'operation_id': 'kbs_kb_expand_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'kb',
'body',
],
'required': [
'kb',
'body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'kb':
(str,),
'body':
(str,),
},
'attribute_map': {
'kb': 'kb',
},
'location_map': {
'kb': 'path',
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [
'application/sparql-query',
'application/x-www-form-urlencoded'
]
},
api_client=api_client,
callable=__kbs_kb_expand_post
)
def __kbs_kb_sparql_get(
self,
kb,
query,
**kwargs
):
"""Perform SPARQL query encoded in URL parameter # noqa: E501
Perform SPARQL query using Owlet-style embedded class expression. This is not a complete SPARQL endpoint. It is for using Owlery as a federated query endpoint for a single Owlet triple pattern. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.kbs_kb_sparql_get(kb, query, async_req=True)
>>> result = thread.get()
Args:
kb (str): label for a knowledgebase in this Owlery
query (str): SPARQL query
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['kb'] = \
kb
kwargs['query'] = \
query
return self.call_with_http_info(**kwargs)
self.kbs_kb_sparql_get = _Endpoint(
settings={
'response_type': None,
'auth': [],
'endpoint_path': '/kbs/{kb}/sparql',
'operation_id': 'kbs_kb_sparql_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'kb',
'query',
],
'required': [
'kb',
'query',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'kb':
(str,),
'query':
(str,),
},
'attribute_map': {
'kb': 'kb',
'query': 'query',
},
'location_map': {
'kb': 'path',
'query': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/sparql-results+xml'
],
'content_type': [],
},
api_client=api_client,
callable=__kbs_kb_sparql_get
)
def __kbs_kb_sparql_post(
self,
kb,
body,
**kwargs
):
"""Perform SPARQL query contained in request body # noqa: E501
Perform SPARQL query using Owlet-style embedded class expression. This is not a complete SPARQL endpoint. It is for using Owlery as a federated query endpoint for a single Owlet triple pattern. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.kbs_kb_sparql_post(kb, body, async_req=True)
>>> result = thread.get()
Args:
kb (str): label for a knowledgebase in this Owlery
body (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['kb'] = \
kb
kwargs['body'] = \
body
return self.call_with_http_info(**kwargs)
self.kbs_kb_sparql_post = _Endpoint(
settings={
'response_type': None,
'auth': [],
'endpoint_path': '/kbs/{kb}/sparql',
'operation_id': 'kbs_kb_sparql_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'kb',
'body',
],
'required': [
'kb',
'body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'kb':
(str,),
'body':
(str,),
},
'attribute_map': {
'kb': 'kb',
},
'location_map': {
'kb': 'path',
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [
'application/sparql-query',
'application/x-www-form-urlencoded'
]
},
api_client=api_client,
callable=__kbs_kb_sparql_post
)
| 35.657509
| 219
| 0.453644
| 1,734
| 19,469
| 4.868512
| 0.121684
| 0.029851
| 0.024639
| 0.025586
| 0.910448
| 0.900971
| 0.900971
| 0.900971
| 0.882729
| 0.872779
| 0
| 0.004116
| 0.463403
| 19,469
| 545
| 220
| 35.722936
| 0.803963
| 0.354975
| 0
| 0.698061
| 0
| 0
| 0.198899
| 0.039266
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01385
| false
| 0
| 0.01108
| 0
| 0.038781
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
502350f581d3924fb70e62c90af4ec84d86da448
| 13,809
|
py
|
Python
|
tests/test_script.py
|
szypkiwonsz/Cryptocurrency-Script
|
e9ec7803fd78c49a598bfb3c89c800c347d5eb34
|
[
"MIT"
] | null | null | null |
tests/test_script.py
|
szypkiwonsz/Cryptocurrency-Script
|
e9ec7803fd78c49a598bfb3c89c800c347d5eb34
|
[
"MIT"
] | null | null | null |
tests/test_script.py
|
szypkiwonsz/Cryptocurrency-Script
|
e9ec7803fd78c49a598bfb3c89c800c347d5eb34
|
[
"MIT"
] | 1
|
2021-09-29T18:34:11.000Z
|
2021-09-29T18:34:11.000Z
|
from datetime import datetime
from unittest.mock import patch, mock_open
import pytest
from click.testing import CliRunner
from dateutil.relativedelta import relativedelta
from httmock import HTTMock, all_requests
from script import average_price_by_month, consecutive_increase, export
from tests.conftest import api_content
@all_requests
def api_mock(url, request):
return {'status_code': 200,
'content': api_content}
runner = CliRunner()
@pytest.mark.script
class TestAveragePriceByMonth:
def test_average_price_by_month(self, database):
with patch('database_handler.DatabaseHandler.db') as mock:
mock.return_value = database
with HTTMock(api_mock):
response = runner.invoke(average_price_by_month, ['--start_date=2012-01', '--end_date=2012-02'])
assert response.exit_code == 0
assert '5.48' in response.output
def test_average_price_by_month_diff_coin(self, database):
with patch('database_handler.DatabaseHandler.db') as mock:
mock.return_value = database
with HTTMock(api_mock):
response = runner.invoke(average_price_by_month, [
'--start_date=2012-01', '--end_date=2012-02', '--coin=usdt-tether'])
assert response.exit_code == 0
assert '5.48' in response.output
def test_average_price_by_month_too_early_start_date(self):
response = runner.invoke(average_price_by_month, ['--start_date=2008-12-31', '--end_date=2011-12'])
assert response.exit_code == 2
def test_average_price_by_month_too_early_start_date_previously_end_date(self):
response = runner.invoke(average_price_by_month, ['--end_date=2011-12', '--start_date=2008-12-31'])
assert response.exit_code == 2
def test_average_price_by_month_too_late_start_date(self):
next_month = datetime.now() + relativedelta(months=1)
response = runner.invoke(average_price_by_month, [f'--start_date={str(next_month)[:7]}', '--end_date=2011-12'])
assert response.exit_code == 2
def test_average_price_by_month_too_late_start_date_previously_end_date(self):
next_month = datetime.now() + relativedelta(months=1)
response = runner.invoke(average_price_by_month, ['--end_date=2011-12', f'--start_date={str(next_month)[:7]}'])
assert response.exit_code == 2
def test_average_price_by_month_too_late_end_date(self):
next_month = datetime.now() + relativedelta(months=1)
response = runner.invoke(average_price_by_month, ['--start_date=2012-01', f'--end_date={str(next_month)[:7]}'])
assert response.exit_code == 2
def test_average_price_by_month_too_late_end_date_previously_end_date(self):
next_month = datetime.now() + relativedelta(months=1)
response = runner.invoke(average_price_by_month, [f'--end_date={str(next_month)[:7]}', '--start_date=2012-01'])
assert response.exit_code == 2
def test_average_price_by_month_end_date_before_start_date(self):
response = runner.invoke(average_price_by_month, ['--start_date=2012-01', '--end_date=2011-12'])
assert response.exit_code == 2
def test_average_price_by_month_end_date_before_start_date_previously_end_date(self):
response = runner.invoke(average_price_by_month, ['--end_date=2011-12', '--start_date=2012-01'])
assert response.exit_code == 2
def test_average_price_by_month_wrong_arguments(self):
response = runner.invoke(average_price_by_month, ['--start_date=2012-01-01', f'--end_date=2011-12-01'])
assert response.exit_code == 2
@pytest.mark.script
class TestConsecutiveIncrease:
def test_consecutive_increase(self, database):
with patch('database_handler.DatabaseHandler.db') as mock:
mock.return_value = database
with HTTMock(api_mock):
response = runner.invoke(consecutive_increase, ['--start_date=2012-01-01', '--end_date=2012-02-03'])
assert response.exit_code == 0
assert '$17.66' in response.output
def test_consecutive_increase_diff_coin(self, database):
with patch('database_handler.DatabaseHandler.db') as mock:
mock.return_value = database
with HTTMock(api_mock):
response = runner.invoke(consecutive_increase, [
'--start_date=2012-01-01', '--end_date=2012-02-03', '--coin=usdt-tether'
])
assert response.exit_code == 0
assert '$17.66' in response.output
def test_consecutive_increase_by_month_too_early_start_date(self):
response = runner.invoke(consecutive_increase, [
'--start_date=2008-12-31', '--end_date=2011-12-01'
])
assert response.exit_code == 2
def test_consecutive_increase_by_month_too_early_start_date_previously_end_date(self):
response = runner.invoke(consecutive_increase, [
'--end_date=2011-12-01', '--start_date=2008-12-31'
])
assert response.exit_code == 2
def test_consecutive_increase_by_month_too_late_start_date(self):
next_month = datetime.now() + relativedelta(months=1)
response = runner.invoke(consecutive_increase, [
f'--start_date={str(next_month)[:10]}', '--end_date=2011-12-01'
])
assert response.exit_code == 2
def test_consecutive_increase_by_month_too_late_start_date_previously_end_date(self):
next_month = datetime.now() + relativedelta(months=1)
response = runner.invoke(consecutive_increase, [
'--end_date=2011-12-01', f'--start_date={str(next_month)[:10]}'
])
assert response.exit_code == 2
def test_consecutive_increase_too_late_end_date(self):
next_month = datetime.now() + relativedelta(months=1)
response = runner.invoke(consecutive_increase, [
'--start_date=2012-01-01', f'--end_date={str(next_month)[:10]}'
])
assert response.exit_code == 2
def test_consecutive_increase_too_late_end_date_previously_end_date(self):
next_month = datetime.now() + relativedelta(months=1)
response = runner.invoke(consecutive_increase, [
f'--end_date={str(next_month)[:10]}', '--start_date=2012-01-01'
])
assert response.exit_code == 2
def test_consecutive_increase_end_date_before_start_date(self):
response = runner.invoke(consecutive_increase, ['--start_date=2012-01-01', '--end_date=2011-12-01'])
assert response.exit_code == 2
def test_consecutive_increase_end_date_before_start_date_previously_end_date(self):
response = runner.invoke(consecutive_increase, ['--end_date=2011-12-01', '--start_date=2012-01-01'])
assert response.exit_code == 2
def test_consecutive_increase_wrong_arguments(self):
response = runner.invoke(consecutive_increase, ['--start_date=2012-01', '--end_date=2011-12'])
assert response.exit_code == 2
@pytest.mark.script
class TestExport:
def test_export_json(self, database):
with patch('database_handler.DatabaseHandler.db') as mock:
mock.return_value = database
with HTTMock(api_mock):
with patch('data_exporters.open', mock_open()) as mocked_file:
response = runner.invoke(export, [
'--start_date=2012-01-01', '--end_date=2012-02-03', '--format_type=json', '--file=data.json'
])
mocked_file.assert_called_once_with('data.json', 'w')
assert response.exit_code == 0
assert 'data.json' in response.output
def test_export_json_diff_coin(self, database):
with patch('database_handler.DatabaseHandler.db') as mock:
mock.return_value = database
with HTTMock(api_mock):
with patch('data_exporters.open', mock_open()) as mocked_file:
response = runner.invoke(export, [
'--start_date=2012-01-02', '--end_date=2012-02-04', '--format_type=json', '--file=data.json',
'--coin=usdt-tether'
])
mocked_file.assert_called_once_with('data.json', 'w')
assert response.exit_code == 0
assert 'data.json' in response.output
def test_export_csv(self, database):
with patch('database_handler.DatabaseHandler.db') as mock:
mock.return_value = database
with HTTMock(api_mock):
with patch('data_exporters.open', mock_open()) as mocked_file:
response = runner.invoke(export, [
'--start_date=2012-01-01', '--end_date=2012-02-03', '--format_type=csv', '--file=data.csv'
])
mocked_file.assert_called_once_with('data.csv', 'w', newline='')
assert response.exit_code == 0
assert 'data.csv' in response.output
def test_export_csv_diff_coin(self, database):
with patch('database_handler.DatabaseHandler.db') as mock:
mock.return_value = database
with HTTMock(api_mock):
with patch('data_exporters.open', mock_open()) as mocked_file:
response = runner.invoke(export, [
'--start_date=2012-01-02', '--end_date=2012-02-04', '--format_type=csv', '--file=data.csv',
'--coin=usdt-tether'
])
mocked_file.assert_called_once_with('data.csv', 'w', newline='')
assert response.exit_code == 0
assert 'data.csv' in response.output
def test_export_json_without_extension(self, database):
with patch('database_handler.DatabaseHandler.db') as mock:
mock.return_value = database
with HTTMock(api_mock):
with patch('data_exporters.open', mock_open()) as mocked_file:
response = runner.invoke(export, [
'--start_date=2012-01-01', '--end_date=2012-02-03', '--format_type=json', '--file=data'
])
mocked_file.assert_called_once_with('data.json', 'w')
assert response.exit_code == 0
assert 'data.json' in response.output
def test_export_csv_without_extension(self, database):
with patch('database_handler.DatabaseHandler.db') as mock:
mock.return_value = database
with HTTMock(api_mock):
with patch('data_exporters.open', mock_open()) as mocked_file:
response = runner.invoke(export, [
'--start_date=2012-01-02', '--end_date=2012-02-04', '--format_type=csv', '--file=data',
'--coin=usdt-tether'
])
mocked_file.assert_called_once_with('data.csv', 'w', newline='')
assert response.exit_code == 0
assert 'data.csv' in response.output
def test_export_by_month_too_early_start_date(self):
response = runner.invoke(export, [
'--start_date=2008-12-31', '--end_date=2011-12-01', '--format_type=json', '--file=data.json'
])
assert response.exit_code == 2
def test_export_by_month_too_early_start_date_previously_end_date(self):
response = runner.invoke(export, [
'--end_date=2011-12-01', '--start_date=2008-12-31', '--format_type=json', '--file=data.json'
])
assert response.exit_code == 2
def test_export_by_month_too_late_start_date(self):
next_month = datetime.now() + relativedelta(months=1)
response = runner.invoke(export, [
f'--start_date={str(next_month)[:10]}', '--end_date=2011-12-01', '--format_type=json', '--file=data.json'
])
assert response.exit_code == 2
def test_export_by_month_too_late_start_date_previously_end_date(self):
next_month = datetime.now() + relativedelta(months=1)
response = runner.invoke(export, [
'--end_date=2011-12-01', f'--start_date={str(next_month)[:10]}', '--format_type=json', '--file=data.json'
])
assert response.exit_code == 2
def test_export_too_late_end_date(self):
next_month = datetime.now() + relativedelta(months=1)
response = runner.invoke(export, [
'--start_date=2012-01-01', f'--end_date={str(next_month)[:10]}', '--format_type=json', '--file=data.json'
])
assert response.exit_code == 2
def test_export_too_late_end_date_previously_end_date(self):
next_month = datetime.now() + relativedelta(months=1)
response = runner.invoke(export, [f'--end_date={str(next_month)[:10]}', '--start_date=2012-01-01',
'--format_type=json', '--file=data.json'])
assert response.exit_code == 2
def test_export_end_date_before_start_date_previously_end_date(self):
response = runner.invoke(export, ['--end_date=2011-12-01', '--start_date=2012-01-01', '--format_type=json',
'--file=data.json'])
assert response.exit_code == 2
def test_export_end_date_before_start_date(self):
response = runner.invoke(export, ['--start_date=2012-01-01', '--end_date=2011-12-01', '--format_type=json',
'--file=data.json'])
assert response.exit_code == 2
def test_export_wrong_arguments(self):
response = runner.invoke(export, [
'--start_date=2012-01-01', '--end_date=2011-12-01', '--format_type=wrong', '--file=data.json'
])
assert response.exit_code == 2
| 47.782007
| 119
| 0.634079
| 1,736
| 13,809
| 4.735023
| 0.061636
| 0.051946
| 0.090024
| 0.099027
| 0.941241
| 0.941241
| 0.928467
| 0.920803
| 0.91618
| 0.905961
| 0
| 0.052436
| 0.240423
| 13,809
| 288
| 120
| 47.947917
| 0.731242
| 0
| 0
| 0.615063
| 0
| 0
| 0.211167
| 0.128032
| 0
| 0
| 0
| 0
| 0.221757
| 1
| 0.158996
| false
| 0
| 0.033473
| 0.004184
| 0.209205
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
504f922e0d685b4f28e9ff1bb793159169019aa2
| 43
|
py
|
Python
|
ex_package/ex_package.py
|
kungfupanda92/python-sample-package
|
3d2d086514a66e0b46f5fd40f5b835f3679936b4
|
[
"MIT"
] | null | null | null |
ex_package/ex_package.py
|
kungfupanda92/python-sample-package
|
3d2d086514a66e0b46f5fd40f5b835f3679936b4
|
[
"MIT"
] | null | null | null |
ex_package/ex_package.py
|
kungfupanda92/python-sample-package
|
3d2d086514a66e0b46f5fd40f5b835f3679936b4
|
[
"MIT"
] | null | null | null |
def hello_print():
print("hello baby")
| 14.333333
| 23
| 0.651163
| 6
| 43
| 4.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 43
| 2
| 24
| 21.5
| 0.771429
| 0
| 0
| 0
| 0
| 0
| 0.232558
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
505fc8437fc0c3c250458093521a57f2e9a45d55
| 21,479
|
py
|
Python
|
service/normalizer.py
|
chdigiorno/georef-ar-api
|
951d8804e286b95de5dc63b6a04d792052c1b553
|
[
"MIT"
] | 1
|
2020-04-29T03:43:05.000Z
|
2020-04-29T03:43:05.000Z
|
service/normalizer.py
|
chdigiorno/georef-ar-api
|
951d8804e286b95de5dc63b6a04d792052c1b553
|
[
"MIT"
] | null | null | null |
service/normalizer.py
|
chdigiorno/georef-ar-api
|
951d8804e286b95de5dc63b6a04d792052c1b553
|
[
"MIT"
] | null | null | null |
"""Módulo 'normalizer' de georef-ar-api
Contiene funciones que manejan la lógica de procesamiento
de los recursos que expone la API.
"""
import logging
from flask import current_app
from service import data, params, formatter, address, location, utils
from service import names as N
from service.query_result import QueryResult
logger = logging.getLogger('georef')
def get_elasticsearch():
"""Devuelve la conexión a Elasticsearch activa para la sesión
de flask. La conexión es creada si no existía.
Returns:
Elasticsearch: conexión a Elasticsearch.
Raises:
data.DataConnectionException: En caso de ocurrir un error de
conexión con la capa de manejo de datos.
"""
if not hasattr(current_app, 'elasticsearch'):
current_app.elasticsearch = data.elasticsearch_connection(
hosts=current_app.config['ES_HOSTS'],
sniff=current_app.config['ES_SNIFF'],
sniffer_timeout=current_app.config['ES_SNIFFER_TIMEOUT']
)
return current_app.elasticsearch
def _process_entity_single(request, name, param_parser, key_translations):
"""Procesa una request GET para consultar datos de una entidad.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request GET de flask.
name (str): Nombre de la entidad.
param_parser (ParameterSet): Objeto utilizado para parsear los
parámetros.
key_translations (dict): Traducciones de keys a utilizar para convertir
el diccionario de parámetros del usuario a un diccionario
representando una query a Elasticsearch.
Raises:
data.DataConnectionException: En caso de ocurrir un error de
conexión con la capa de manejo de datos.
Returns:
flask.Response: respuesta HTTP
"""
try:
qs_params = param_parser.parse_get_params(request.args)
except params.ParameterParsingException as e:
return formatter.create_param_error_response_single(e.errors, e.fmt)
# Construir query a partir de parámetros
query = utils.translate_keys(qs_params, key_translations,
ignore=[N.FLATTEN, N.FORMAT])
# Construir reglas de formato a partir de parámetros
fmt = {
key: qs_params[key]
for key in [N.FLATTEN, N.FIELDS, N.FORMAT]
if key in qs_params
}
if fmt[N.FORMAT] == 'shp':
query['fields'] += (N.GEOM,)
es = get_elasticsearch()
search_class = data.entity_search_class(name)
search = search_class(query)
data.ElasticsearchSearch.run_searches(es, [search])
query_result = QueryResult.from_entity_list(search.result.hits,
search.result.total,
search.result.offset)
return formatter.create_ok_response(name, query_result, fmt)
def _process_entity_bulk(request, name, param_parser, key_translations):
"""Procesa una request POST para consultar datos de una lista de entidades.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request POST de flask.
name (str): Nombre de la entidad.
param_parser (ParameterSet): Objeto utilizado para parsear los
parámetros.
key_translations (dict): Traducciones de keys a utilizar para convertir
los diccionarios de parámetros del usuario a una lista de
diccionarios representando las queries a Elasticsearch.
Raises:
data.DataConnectionException: En caso de ocurrir un error de
conexión con la capa de manejo de datos.
Returns:
flask.Response: respuesta HTTP
"""
try:
body_params = param_parser.parse_post_params(
request.args, request.json, name)
except params.ParameterParsingException as e:
return formatter.create_param_error_response_bulk(e.errors)
queries = []
formats = []
for parsed_params in body_params:
# Construir query a partir de parámetros
query = utils.translate_keys(parsed_params, key_translations,
ignore=[N.FLATTEN, N.FORMAT])
# Construir reglas de formato a partir de parámetros
fmt = {
key: parsed_params[key]
for key in [N.FLATTEN, N.FIELDS]
if key in parsed_params
}
queries.append(query)
formats.append(fmt)
es = get_elasticsearch()
search_class = data.entity_search_class(name)
searches = [search_class(query) for query in queries]
data.ElasticsearchSearch.run_searches(es, searches)
query_results = [
QueryResult.from_entity_list(search.result.hits,
search.result.total,
search.result.offset)
for search in searches
]
return formatter.create_ok_response_bulk(name, query_results, formats)
def _process_entity(request, name, param_parser, key_translations):
"""Procesa una request GET o POST para consultar datos de una entidad.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
En caso de ocurrir un error interno, se retorna una respuesta HTTP 500.
Args:
request (flask.Request): Request GET o POST de flask.
name (str): Nombre de la entidad.
param_parser (ParameterSet): Objeto utilizado para parsear los
parámetros.
key_translations (dict): Traducciones de keys a utilizar para convertir
los diccionarios de parámetros del usuario a una lista de
diccionarios representando las queries a Elasticsearch.
Returns:
flask.Response: respuesta HTTP
"""
try:
if request.method == 'GET':
return _process_entity_single(request, name, param_parser,
key_translations)
return _process_entity_bulk(request, name, param_parser,
key_translations)
except data.DataConnectionException:
logger.exception(
'Excepción en manejo de consulta para recurso: {}'.format(name))
return formatter.create_internal_error_response()
def process_state(request):
"""Procesa una request GET o POST para consultar datos de provincias.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request GET o POST de flask.
Returns:
flask.Response: respuesta HTTP
"""
return _process_entity(request, N.STATES, params.PARAMS_STATES, {
N.ID: 'ids',
N.NAME: 'name',
N.INTERSECTION: 'geo_shape_ids',
N.EXACT: 'exact',
N.ORDER: 'order',
N.FIELDS: 'fields',
N.OFFSET: 'offset',
N.MAX: 'size'
})
def process_department(request):
"""Procesa una request GET o POST para consultar datos de departamentos.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request GET o POST de flask.
Returns:
flask.Response: respuesta HTTP
"""
return _process_entity(
request, N.DEPARTMENTS,
params.PARAMS_DEPARTMENTS, {
N.ID: 'ids',
N.NAME: 'name',
N.INTERSECTION: 'geo_shape_ids',
N.STATE: 'state',
N.EXACT: 'exact',
N.ORDER: 'order',
N.FIELDS: 'fields',
N.OFFSET: 'offset',
N.MAX: 'size'
})
def process_municipality(request):
"""Procesa una request GET o POST para consultar datos de municipios.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request GET o POST de flask.
Returns:
flask.Response: respuesta HTTP
"""
return _process_entity(
request, N.MUNICIPALITIES,
params.PARAMS_MUNICIPALITIES, {
N.ID: 'ids',
N.NAME: 'name',
N.INTERSECTION: 'geo_shape_ids',
N.STATE: 'state',
N.EXACT: 'exact',
N.ORDER: 'order',
N.FIELDS: 'fields',
N.OFFSET: 'offset',
N.MAX: 'size'
})
def process_locality(request):
"""Procesa una request GET o POST para consultar datos de localidades.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request GET o POST de flask.
Returns:
flask.Response: respuesta HTTP
"""
return _process_entity(request, N.LOCALITIES, params.PARAMS_LOCALITIES, {
N.ID: 'ids',
N.NAME: 'name',
N.STATE: 'state',
N.DEPT: 'department',
N.MUN: 'municipality',
N.EXACT: 'exact',
N.ORDER: 'order',
N.FIELDS: 'fields',
N.OFFSET: 'offset',
N.MAX: 'size'
})
def _build_street_query_format(parsed_params):
"""Construye dos diccionarios a partir de parámetros de consulta
recibidos, el primero representando la query a Elasticsearch a
realizar y el segundo representando las propiedades de formato
(presentación) que se le debe dar a los datos obtenidos de la misma.
Args:
parsed_params (dict): Parámetros de una consulta para el índice de
calles.
Returns:
tuple: diccionario de query y diccionario de formato
"""
# Construir query a partir de parámetros
query = utils.translate_keys(parsed_params, {
N.ID: 'ids',
N.NAME: 'name',
N.INTERSECTION: 'geo_shape_ids',
N.STATE: 'state',
N.DEPT: 'department',
N.EXACT: 'exact',
N.FIELDS: 'fields',
N.CATEGORY: 'category',
N.OFFSET: 'offset',
N.ORDER: 'order',
N.MAX: 'size'
}, ignore=[N.FLATTEN, N.FORMAT])
# Construir reglas de formato a partir de parámetros
fmt = {
key: parsed_params[key]
for key in [N.FLATTEN, N.FIELDS, N.FORMAT]
if key in parsed_params
}
return query, fmt
def _process_street_single(request):
"""Procesa una request GET para consultar datos de calles.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request GET de flask.
Raises:
data.DataConnectionException: En caso de ocurrir un error de
conexión con la capa de manejo de datos.
Returns:
flask.Response: respuesta HTTP
"""
try:
qs_params = params.PARAMS_STREETS.parse_get_params(request.args)
except params.ParameterParsingException as e:
return formatter.create_param_error_response_single(e.errors, e.fmt)
query, fmt = _build_street_query_format(qs_params)
if fmt[N.FORMAT] == 'shp':
query['fields'] += (N.GEOM,)
es = get_elasticsearch()
search = data.StreetsSearch(query)
data.ElasticsearchSearch.run_searches(es, [search])
query_result = QueryResult.from_entity_list(search.result.hits,
search.result.total,
search.result.offset)
return formatter.create_ok_response(N.STREETS, query_result, fmt)
def _process_street_bulk(request):
"""Procesa una request POST para consultar datos de calles.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request POST de flask.
Raises:
data.DataConnectionException: En caso de ocurrir un error de
conexión con la capa de manejo de datos.
Returns:
flask.Response: respuesta HTTP
"""
try:
body_params = params.PARAMS_STREETS.parse_post_params(
request.args, request.json, N.STREETS)
except params.ParameterParsingException as e:
return formatter.create_param_error_response_bulk(e.errors)
queries = []
formats = []
for parsed_params in body_params:
query, fmt = _build_street_query_format(parsed_params)
queries.append(query)
formats.append(fmt)
es = get_elasticsearch()
searches = [data.StreetsSearch(query) for query in queries]
data.ElasticsearchSearch.run_searches(es, searches)
query_results = [
QueryResult.from_entity_list(search.result.hits,
search.result.total,
search.result.offset)
for search in searches
]
return formatter.create_ok_response_bulk(N.STREETS, query_results, formats)
def process_street(request):
"""Procesa una request GET o POST para consultar datos de calles.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
En caso de ocurrir un error interno, se retorna una respuesta HTTP 500.
Args:
request (flask.Request): Request GET o POST de flask.
Returns:
flask.Response: respuesta HTTP
"""
try:
if request.method == 'GET':
return _process_street_single(request)
return _process_street_bulk(request)
except data.DataConnectionException:
logger.exception(
'Excepción en manejo de consulta para recurso: calles')
return formatter.create_internal_error_response()
def _build_address_query_format(parsed_params):
"""Construye dos diccionarios a partir de parámetros de consulta
recibidos, el primero representando la query a Elasticsearch a
realizar y el segundo representando las propiedades de formato
(presentación) que se le debe dar a los datos obtenidos de la misma.
Args:
parsed_params (dict): Parámetros de una consulta normalización de
una dirección.
Returns:
tuple: diccionario de query y diccionario de formato
"""
# Construir query a partir de parámetros
query = utils.translate_keys(parsed_params, {
N.DEPT: 'department',
N.STATE: 'state',
N.EXACT: 'exact',
N.OFFSET: 'offset',
N.ORDER: 'order',
N.MAX: 'size'
}, ignore=[N.FLATTEN, N.FORMAT, N.FIELDS])
# Construir reglas de formato a partir de parámetros
fmt = {
key: parsed_params[key]
for key in [N.FLATTEN, N.FIELDS, N.FORMAT]
if key in parsed_params
}
return query, fmt
def _process_address_queries(params_list):
"""Ejecuta una lista de consultas de direcciones, partiendo desde los
parámetros recibidos del usuario.
Args:
params_list (list): Lista de dict, cada dict conteniendo los parámetros
de una consulta al recurso de direcciones de la API.
Returns:
tuple: Tupla de (list, list), donde la primera lista contiene una
instancia de QueryResult por cada consulta, y la segunda lista
contiene una instancia de dict utilizada para darle formato al
resultado más tarde.
"""
queries = []
formats = []
for parsed_params in params_list:
query, fmt = _build_address_query_format(parsed_params)
queries.append(query)
formats.append(fmt)
es = get_elasticsearch()
query_results = address.run_address_queries(es, queries, formats)
return query_results, formats
def _process_address_single(request):
"""Procesa una request GET para normalizar una dirección.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request GET de flask.
Raises:
data.DataConnectionException: En caso de ocurrir un error de
conexión con la capa de manejo de datos.
Returns:
flask.Response: respuesta HTTP
"""
try:
qs_params = params.PARAMS_ADDRESSES.parse_get_params(request.args)
except params.ParameterParsingException as e:
return formatter.create_param_error_response_single(e.errors, e.fmt)
query_results, formats = _process_address_queries([qs_params])
return formatter.create_ok_response(N.ADDRESSES, query_results[0],
formats[0])
def _process_address_bulk(request):
"""Procesa una request POST para normalizar lote de direcciones.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request POST de flask.
Raises:
data.DataConnectionException: En caso de ocurrir un error de
conexión con la capa de manejo de datos.
Returns:
flask.Response: respuesta HTTP
"""
try:
body_params = params.PARAMS_ADDRESSES.parse_post_params(
request.args, request.json, N.ADDRESSES)
except params.ParameterParsingException as e:
return formatter.create_param_error_response_bulk(e.errors)
query_results, formats = _process_address_queries(body_params)
return formatter.create_ok_response_bulk(N.ADDRESSES, query_results,
formats)
def process_address(request):
"""Procesa una request GET o POST para normalizar lote de direcciones.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
En caso de ocurrir un error interno, se retorna una respuesta HTTP 500.
Args:
request (flask.Request): Request GET o POST de flask.
Returns:
flask.Response: respuesta HTTP
"""
try:
if request.method == 'GET':
return _process_address_single(request)
return _process_address_bulk(request)
except data.DataConnectionException:
logger.exception(
'Excepción en manejo de consulta para recurso: direcciones')
return formatter.create_internal_error_response()
def _build_location_query_format(parsed_params):
"""Construye dos diccionarios a partir de parámetros de consulta
recibidos, el primero representando la query a Elasticsearch a
realizar y el segundo representando las propiedades de formato
(presentación) que se le debe dar a los datos obtenidos de la misma.
Args:
parsed_params (dict): Parámetros de una consulta para una ubicación.
Returns:
tuple: diccionario de query y diccionario de formato
"""
# Construir query a partir de parámetros
query = utils.translate_keys(parsed_params, {}, ignore=[N.FLATTEN,
N.FORMAT])
# Construir reglas de formato a partir de parámetros
fmt = {
key: parsed_params[key]
for key in [N.FLATTEN, N.FIELDS, N.FORMAT]
if key in parsed_params
}
return query, fmt
def _process_location_single(request):
"""Procesa una request GET para obtener entidades en un punto.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request GET de flask.
Raises:
data.DataConnectionException: En caso de ocurrir un error de
conexión con la capa de manejo de datos.
Returns:
flask.Response: respuesta HTTP
"""
try:
qs_params = params.PARAMS_LOCATION.parse_get_params(request.args)
except params.ParameterParsingException as e:
return formatter.create_param_error_response_single(e.errors, e.fmt)
query, fmt = _build_location_query_format(qs_params)
es = get_elasticsearch()
result = location.run_location_queries(es, [query])[0]
return formatter.create_ok_response(N.LOCATION, result, fmt)
def _process_location_bulk(request):
"""Procesa una request POST para obtener entidades en varios puntos.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request POST de flask.
Raises:
data.DataConnectionException: En caso de ocurrir un error de
conexión con la capa de manejo de datos.
Returns:
flask.Response: respuesta HTTP
"""
try:
body_params = params.PARAMS_LOCATION.parse_post_params(
request.args, request.json, N.LOCATIONS)
except params.ParameterParsingException as e:
return formatter.create_param_error_response_bulk(e.errors)
queries = []
formats = []
for parsed_params in body_params:
query, fmt = _build_location_query_format(parsed_params)
queries.append(query)
formats.append(fmt)
es = get_elasticsearch()
results = location.run_location_queries(es, queries)
return formatter.create_ok_response_bulk(N.LOCATION, results, formats)
def process_location(request):
"""Procesa una request GET o POST para obtener entidades en una o varias
ubicaciones.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
En caso de ocurrir un error interno, se retorna una respuesta HTTP 500.
Args:
request (flask.Request): Request GET o POST de flask.
Returns:
flask.Response: respuesta HTTP
"""
try:
if request.method == 'GET':
return _process_location_single(request)
return _process_location_bulk(request)
except data.DataConnectionException:
logger.exception(
'Excepción en manejo de consulta para recurso: ubicacion')
return formatter.create_internal_error_response()
| 31.962798
| 79
| 0.655058
| 2,630
| 21,479
| 5.221673
| 0.092776
| 0.034079
| 0.016894
| 0.031676
| 0.848176
| 0.824656
| 0.796403
| 0.767567
| 0.742882
| 0.728828
| 0
| 0.004032
| 0.272592
| 21,479
| 671
| 80
| 32.010432
| 0.874936
| 0.409377
| 0
| 0.623693
| 0
| 0
| 0.051532
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0.017422
| 0
| 0.219512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.