hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
41cd8d56f4d24e6552e6fd98e710360e73235678
| 54
|
py
|
Python
|
tianshou/core/value_function/__init__.py
|
Suffoquer-fang/tianshou
|
d59d6cafe9f5d75789bdbed7fad4cd4b79877f4f
|
[
"MIT"
] | null | null | null |
tianshou/core/value_function/__init__.py
|
Suffoquer-fang/tianshou
|
d59d6cafe9f5d75789bdbed7fad4cd4b79877f4f
|
[
"MIT"
] | null | null | null |
tianshou/core/value_function/__init__.py
|
Suffoquer-fang/tianshou
|
d59d6cafe9f5d75789bdbed7fad4cd4b79877f4f
|
[
"MIT"
] | null | null | null |
from .state_value import *
from .action_value import *
| 27
| 27
| 0.796296
| 8
| 54
| 5.125
| 0.625
| 0.536585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12963
| 54
| 2
| 27
| 27
| 0.87234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ec2ad7313826be3642c4e71d012a07623282fc8c
| 6,257
|
py
|
Python
|
tests/test_api.py
|
jbrentfoster/wae-epnm-collector
|
93315ee286084232d823c2bd66a850c13bdee838
|
[
"MIT"
] | null | null | null |
tests/test_api.py
|
jbrentfoster/wae-epnm-collector
|
93315ee286084232d823c2bd66a850c13bdee838
|
[
"MIT"
] | null | null | null |
tests/test_api.py
|
jbrentfoster/wae-epnm-collector
|
93315ee286084232d823c2bd66a850c13bdee838
|
[
"MIT"
] | 2
|
2019-02-18T16:43:55.000Z
|
2020-02-21T23:54:57.000Z
|
# Tests to check the api calls being made by the script
import pytest
import requests
import json
import time
import logging
import responses
from collectioncode import utils, collect
from mock import Mock, patch
# Setting up the vars for use in the api calls
baseurl = "https://10.135.7.222/restconf"
# uri = "/data/v1/cisco-resource-network:topological-link?topo-layer=ots-link-layer&.startIndex=0"
# Updating URI to fix the performance tac case
uri = "/data/v1/cisco-resource-network:topological-link?topo-layer=ots-link-layer&.skipFiberAttributes=true&.skipPerformanceMetrics=true&.startIndex=0"
user = 'root'
password = 'Epnm1234'
Logger = logging.getLogger(__name__)
def setup_module(utils):
collect.thread_data.logger = Logger
# The test names should be description enough
def test_api_get_successful_response():
circuit_breaker1 = utils.Circuit_breaker(timeout_limit=15)
response = circuit_breaker1.request(baseurl, uri, user, password)
assert len(response) > 0
# The test retrieve mpls with iterating over potential nodes
def test_api_get_mpls_response():
circuit_breaker1 = utils.Circuit_breaker(timeout_limit=15)
state_or_states = ["Deleware", "New York", "New Jersey"]
collect.collect_mpls_topo_json(baseurl, user, password, state_or_states)
uri = '/operations/v1/cisco-resource-activation:run-cli-configuration'
thejson = '{"ra.run-cli-configuration": {"ra.target-list": {"ra.target": {"ra.node-ref": "MD=CISCO_EPNM!ND=NCS4K-Site1"}}, "ra.template-name": "show mpls traffic-eng topology"}}'
response = utils.rest_post_json(baseurl, uri, thejson, user, password)
assert len(response) > 0
# The test retrieve hostname with iterating over potential nodes
def test_api_get_hostname_response():
circuit_breaker1 = utils.Circuit_breaker(timeout_limit=15)
state_or_states = ["Deleware", "New York", "New Jersey"]
collect.collect_hostnames_json(baseurl, user, password, state_or_states)
uri = '/operations/v1/cisco-resource-activation:run-cli-configuration'
thejson = '{"ra.run-cli-configuration": {"ra.target-list": {"ra.target": {"ra.node-ref": "MD=CISCO_EPNM!ND=NCS4K-Site1"}}, "ra.template-name": "show mpls traffic-eng topology"}}'
response = utils.rest_post_json(baseurl, uri, thejson, user, password)
assert len(response) > 0
# Setting the timeout value low to force the exception
def test_api_timeout_return_null():
circuit_breaker1 = utils.Circuit_breaker(timeout_limit=0)
# with pytest.raises(Exception):
# assert circuit_breaker1.request(baseurl, uri, user, password)
response = circuit_breaker1.request(baseurl, uri, user, password)
assert response == '[]'
# Delaying for the max amount of 60 seconds
def test_api_for_none_timeout_value():
resp1 = {
"test": ["Not empty"]
}
baseurl = "https://run.mocky.io/v3/fca0a56e-29b0-41fc-8f2d-5ad7f5eed27e"
uri = "?mocky-delay=20s"
circuit_breaker1 = utils.Circuit_breaker(timeout_limit=None)
response = circuit_breaker1.request(baseurl, uri, user, password)
assert json.loads(response) == resp1
# Delaying for the max amount of 60 seconds
def test_api_for_bad_timeout_value_with_max_delay():
baseurl = "https://run.mocky.io/v3/fca0a56e-29b0-41fc-8f2d-5ad7f5eed27e"
uri = "?mocky-delay=20s"
circuit_breaker1 = utils.Circuit_breaker(timeout_limit=5)
response = circuit_breaker1.request(baseurl, uri, user, password)
assert response == '[]'
# Delaying for the max amount of 60 seconds
def test_api_for_good_timeout_value_with_max_delay():
resp1 = {
"test": ["Not empty"]
}
baseurl = "https://run.mocky.io/v3/fca0a56e-29b0-41fc-8f2d-5ad7f5eed27e"
uri = "?mocky-delay=20s"
circuit_breaker1 = utils.Circuit_breaker(timeout_limit=30)
response = circuit_breaker1.request(baseurl, uri, user, password)
assert json.loads(response) == resp1
# @responses.activate
# def test_api_for_none_timeout_value():
# resp1 = {
# "test": ["Not empty"]
# }
# def request_callback(request):
# time.sleep(5)
# return(200, 'header', resp1)
# responses.add_callback(responses.GET, baseurl + uri, callback=request_callback)
# circuit_breaker1 = utils.Circuit_breaker(timeout_limit=10)
# response = circuit_breaker1.request(baseurl, uri, user, password)
# assert response.json() == resp1
# @patch.object(requests, 'get')
# def test_api_for_none_timeout_value(mock_request):
# resp1 = {
# "test": ["Not empty"]
# }
# def waiting():
# time.sleep(15)
# return resp1
# mockResponse = Mock()
# mockResponse.json.return_value = resp1
# mockResponse.status_code = 200
# mock_request.return_value = mockResponse
# mock_request.side_effect = waiting()
# circuit_breaker1 = utils.Circuit_breaker(timeout_limit=30)
# response = circuit_breaker1.request(baseurl, uri, user, password)
# assert json.loads(response) == resp1
@patch.object(requests, 'get')
def test_api_for_nonempty_response(mock_request):
resp1 = {
"test": ["Not empty"]
}
mockResponse = Mock()
mockResponse.json.return_value = resp1
mockResponse.status_code = 200
mock_request.return_value = mockResponse
circuit_breaker1 = utils.Circuit_breaker()
response = circuit_breaker1.request(baseurl, uri, user, password)
assert json.loads(response) == resp1
@patch.object(requests, 'get')
def test_api_for_empty_response(mock_request):
resp1 = {
"test": []
}
mockResponse = Mock()
mockResponse.json.return_value = resp1
mockResponse.status_code = 200
mock_request.return_value = mockResponse
circuit_breaker1 = utils.Circuit_breaker()
response = circuit_breaker1.request(baseurl, uri, user, password)
assert response == '[]'
@patch.object(requests, 'get')
def test_api_for_error_status_code(mock_request):
resp1 = {
"test": ["Not empty"]
}
mockResponse = Mock()
mockResponse.json.return_value = resp1
mockResponse.status_code = 400
mock_request.return_value = mockResponse
circuit_breaker1 = utils.Circuit_breaker()
response = circuit_breaker1.request(baseurl, uri, user, password)
assert response == '[]'
| 35.551136
| 182
| 0.717756
| 797
| 6,257
| 5.441656
| 0.217064
| 0.079548
| 0.068942
| 0.074706
| 0.794789
| 0.774037
| 0.767581
| 0.732534
| 0.724464
| 0.658981
| 0
| 0.030431
| 0.164935
| 6,257
| 176
| 183
| 35.551136
| 0.799617
| 0.271376
| 0
| 0.616162
| 0
| 0.030303
| 0.219788
| 0.0861
| 0
| 0
| 0
| 0
| 0.10101
| 1
| 0.111111
| false
| 0.131313
| 0.080808
| 0
| 0.191919
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
ec453f872527424f3ec293e836241fd17a4d2f99
| 2,022
|
py
|
Python
|
done/flask-greet-calc/calc/app.py
|
demohack/yute
|
2fb136118733394e3595bf707cb32f1b7b2aede0
|
[
"MIT"
] | null | null | null |
done/flask-greet-calc/calc/app.py
|
demohack/yute
|
2fb136118733394e3595bf707cb32f1b7b2aede0
|
[
"MIT"
] | 17
|
2021-03-24T14:59:50.000Z
|
2022-03-05T23:52:31.000Z
|
done/flask-greet-calc/calc/app.py
|
demohack/yute
|
2fb136118733394e3595bf707cb32f1b7b2aede0
|
[
"MIT"
] | null | null | null |
from flask import Flask, request
app = Flask(__name__)
@app.route('/add', methods = ['POST', 'GET'])
def add(a=0, b=0):
"""Adds a and b and returns result as the body"""
if request.method == 'POST':
a = request.form['a']
b = request.form['b']
elif request.method == 'GET':
a = request.args.get('a')
b = request.args.get('b')
a = int(a)
b = int(b)
html = f"{a + b}"
return html
@app.route('/sub', methods = ['POST', 'GET'])
def sub(a=0, b=0):
"""subtract b from a and returns result as the body"""
if request.method == 'POST':
a = request.form['a']
b = request.form['b']
elif request.method == 'GET':
a = request.args.get('a')
b = request.args.get('b')
a = int(a)
b = int(b)
html = f"{a - b}"
return html
@app.route('/mult', methods = ['POST', 'GET'])
def mult(a=0, b=0):
"""multiply a and b; returns result as the body"""
if request.method == 'POST':
a = request.form['a']
b = request.form['b']
elif request.method == 'GET':
a = request.args.get('a')
b = request.args.get('b')
a = int(a)
b = int(b)
html = f"{a * b}"
return html
@app.route('/div', methods = ['POST', 'GET'])
def div(a=0, b=0):
"""divide a by b; returns result as the body"""
if request.method == 'POST':
a = request.form['a']
b = request.form['b']
elif request.method == 'GET':
a = request.args.get('a')
b = request.args.get('b')
a = int(a)
b = int(b)
html = f"{a / b}"
return html
@app.route('/math/<op>', methods = ['POST', 'GET'])
def math(op):
"""divide a by b; returns result as the body"""
if request.method == 'POST':
a = request.form['a']
b = request.form['b']
elif request.method == 'GET':
a = request.args.get('a')
b = request.args.get('b')
a = int(a)
b = int(b)
r = eval(op + f"({a},{b})")
html = f"{r}"
return html
| 21.741935
| 58
| 0.505935
| 308
| 2,022
| 3.308442
| 0.136364
| 0.039254
| 0.088322
| 0.083415
| 0.757606
| 0.757606
| 0.757606
| 0.757606
| 0.757606
| 0.757606
| 0
| 0.005587
| 0.29179
| 2,022
| 92
| 59
| 21.978261
| 0.706006
| 0.109298
| 0
| 0.714286
| 0
| 0
| 0.088501
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079365
| false
| 0
| 0.015873
| 0
| 0.174603
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6b5ac85ed72f00bfbd2708c78bfacbc994471bff
| 265
|
py
|
Python
|
soli/aria/views/__init__.py
|
rcdixon/soli
|
d29c77c1d391dfcc3c0dd0297ecf93fa9aa046ab
|
[
"MIT"
] | null | null | null |
soli/aria/views/__init__.py
|
rcdixon/soli
|
d29c77c1d391dfcc3c0dd0297ecf93fa9aa046ab
|
[
"MIT"
] | null | null | null |
soli/aria/views/__init__.py
|
rcdixon/soli
|
d29c77c1d391dfcc3c0dd0297ecf93fa9aa046ab
|
[
"MIT"
] | null | null | null |
from .index import index
from aria.views.create.crop import createCrop
from aria.views.create.plot import createPlot, createPlotAjax, deletePlotsAjax
from aria.views.create.species import createSpecies, createGenus
from aria.views.display.crops import displayCrops
| 44.166667
| 78
| 0.85283
| 35
| 265
| 6.457143
| 0.514286
| 0.141593
| 0.230089
| 0.252212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086792
| 265
| 5
| 79
| 53
| 0.933884
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6b66a490a312f1667a8a53ad91c17cf69c48e1f8
| 33
|
py
|
Python
|
tests/fixtures/__init__.py
|
DimonLuk/django-boilerplate
|
ce629e3d32d3a121932d21aa78e83c34e78a9bf0
|
[
"MIT"
] | 9
|
2020-01-31T17:29:21.000Z
|
2020-03-07T12:32:36.000Z
|
tests/fixtures/__init__.py
|
DimonLuk/django-boilerplate
|
ce629e3d32d3a121932d21aa78e83c34e78a9bf0
|
[
"MIT"
] | 8
|
2020-02-20T16:53:45.000Z
|
2020-06-03T07:26:57.000Z
|
tests/fixtures/__init__.py
|
DimonLuk/django-boilerplate
|
ce629e3d32d3a121932d21aa78e83c34e78a9bf0
|
[
"MIT"
] | null | null | null |
from .general import app # NOQA
| 16.5
| 32
| 0.727273
| 5
| 33
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212121
| 33
| 1
| 33
| 33
| 0.923077
| 0.121212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6b9359a45e7b8da6ab66afdfc209556c3bace758
| 8,533
|
py
|
Python
|
cottonformation/res/cloud9.py
|
gitter-badger/cottonformation-project
|
354f1dce7ea106e209af2d5d818b6033a27c193c
|
[
"BSD-2-Clause"
] | null | null | null |
cottonformation/res/cloud9.py
|
gitter-badger/cottonformation-project
|
354f1dce7ea106e209af2d5d818b6033a27c193c
|
[
"BSD-2-Clause"
] | null | null | null |
cottonformation/res/cloud9.py
|
gitter-badger/cottonformation-project
|
354f1dce7ea106e209af2d5d818b6033a27c193c
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class EnvironmentEC2Repository(Property):
"""
AWS Object Type = "AWS::Cloud9::EnvironmentEC2.Repository"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloud9-environmentec2-repository.html
Property Document:
- ``rp_PathComponent``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloud9-environmentec2-repository.html#cfn-cloud9-environmentec2-repository-pathcomponent
- ``rp_RepositoryUrl``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloud9-environmentec2-repository.html#cfn-cloud9-environmentec2-repository-repositoryurl
"""
AWS_OBJECT_TYPE = "AWS::Cloud9::EnvironmentEC2.Repository"
rp_PathComponent: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "PathComponent"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloud9-environmentec2-repository.html#cfn-cloud9-environmentec2-repository-pathcomponent"""
rp_RepositoryUrl: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RepositoryUrl"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloud9-environmentec2-repository.html#cfn-cloud9-environmentec2-repository-repositoryurl"""
#--- Resource declaration ---
@attr.s
class EnvironmentEC2(Resource):
"""
AWS Object Type = "AWS::Cloud9::EnvironmentEC2"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html
Property Document:
- ``rp_InstanceType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-instancetype
- ``p_AutomaticStopTimeMinutes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-automaticstoptimeminutes
- ``p_ConnectionType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-connectiontype
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-description
- ``p_ImageId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-imageid
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-name
- ``p_OwnerArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-ownerarn
- ``p_Repositories``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-repositories
- ``p_SubnetId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-subnetid
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-tags
"""
AWS_OBJECT_TYPE = "AWS::Cloud9::EnvironmentEC2"
rp_InstanceType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "InstanceType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-instancetype"""
p_AutomaticStopTimeMinutes: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "AutomaticStopTimeMinutes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-automaticstoptimeminutes"""
p_ConnectionType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ConnectionType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-connectiontype"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-description"""
p_ImageId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ImageId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-imageid"""
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-name"""
p_OwnerArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "OwnerArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-ownerarn"""
p_Repositories: typing.List[typing.Union['EnvironmentEC2Repository', dict]] = attr.ib(
default=None,
converter=EnvironmentEC2Repository.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(EnvironmentEC2Repository), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Repositories"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-repositories"""
p_SubnetId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SubnetId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-subnetid"""
p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#cfn-cloud9-environmentec2-tags"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#aws-resource-cloud9-environmentec2-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@property
def rv_Name(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloud9-environmentec2.html#aws-resource-cloud9-environmentec2-return-values"""
return GetAtt(resource=self, attr_name="Name")
| 59.256944
| 200
| 0.757998
| 949
| 8,533
| 6.717597
| 0.084299
| 0.181961
| 0.048314
| 0.074667
| 0.896157
| 0.896157
| 0.875451
| 0.86102
| 0.86102
| 0.86102
| 0
| 0.016006
| 0.106762
| 8,533
| 143
| 201
| 59.671329
| 0.820388
| 0.326614
| 0
| 0.3125
| 0
| 0
| 0.05982
| 0.02991
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.05
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6bd1d7ade4329b168050502a59c89e79ea866a92
| 7,748
|
py
|
Python
|
autolens/model/inversion/util/mapper_util.py
|
AshKelly/PyAutoLens
|
043795966338a655339e61782253ad67cc3c14e6
|
[
"MIT"
] | null | null | null |
autolens/model/inversion/util/mapper_util.py
|
AshKelly/PyAutoLens
|
043795966338a655339e61782253ad67cc3c14e6
|
[
"MIT"
] | null | null | null |
autolens/model/inversion/util/mapper_util.py
|
AshKelly/PyAutoLens
|
043795966338a655339e61782253ad67cc3c14e6
|
[
"MIT"
] | null | null | null |
import numpy as np
from autolens import decorator_util
@decorator_util.jit()
def mapping_matrix_from_sub_to_pix(sub_to_pix, pixels, regular_pixels, sub_to_regular, sub_grid_fraction):
"""Computes the mapping matrix, by iterating over the known mappings between the sub-grid and pixelization.
Parameters
-----------
sub_to_pix : ndarray
The mappings between the observed regular's sub-pixels and pixelization's pixels.
pixels : int
The number of pixels in the pixelization.
regular_pixels : int
The number of datas pixels in the observed datas and thus on the regular grid.
sub_to_regular : ndarray
The mappings between the observed regular's sub-pixels and observed regular's pixels.
sub_grid_fraction : float
The fractional area each sub-pixel takes up in an regular-pixel.
"""
mapping_matrix = np.zeros((regular_pixels, pixels))
for sub_index in range(sub_to_regular.shape[0]):
mapping_matrix[sub_to_regular[sub_index], sub_to_pix[sub_index]] += sub_grid_fraction
return mapping_matrix
@decorator_util.jit()
def voronoi_regular_to_pix_from_grids_and_geometry(regular_grid, regular_to_nearest_pix, pixel_centres,
pixel_neighbors, pixel_neighbors_size):
""" Compute the mappings between a set of regular-grid pixels and pixelization pixels, using information on \
how regular pixels map to their closest pixelization pixel on the image-plane pix-grid and the pixelization's \
pixel centres.
To determine the complete set of regular-pixel to pixelization pixel mappings, we must pair every regular-pixel to \
its nearest pixel. Using a full nearest neighbor search to do this is slow, thus the pixel neighbors (derived via \
the Voronoi grid) are used to localize each nearest neighbor search via a graph search.
Parameters
----------
regular_grid : RegularGrid
The grid of (y,x) arc-second coordinates at the centre of every unmasked pixel, which has been traced to \
to an irregular grid via lens.
regular_to_nearest_pix : ndarray
A 1D array that maps every regular-grid pixel to its nearest pix-grid pixel (as determined on the unlensed \
2D array).
pixel_centres : ndarray
The (y,x) centre of every Voronoi pixel in arc-seconds.
pixel_neighbors : ndarray
An array of length (voronoi_pixels) which provides the index of all neighbors of every pixel in \
the Voronoi grid (entries of -1 correspond to no neighbor).
pixel_neighbors_size : ndarray
An array of length (voronoi_pixels) which gives the number of neighbors of every pixel in the \
Voronoi grid.
"""
regular_to_pix = np.zeros((regular_grid.shape[0]))
for regular_index in range(regular_grid.shape[0]):
nearest_pix_pixel_index = regular_to_nearest_pix[regular_index]
while True:
nearest_pix_pixel_center = pixel_centres[nearest_pix_pixel_index]
sub_to_nearest_pix_distance = (regular_grid[regular_index, 0] - nearest_pix_pixel_center[0]) ** 2 + \
(regular_grid[regular_index, 1] - nearest_pix_pixel_center[1]) ** 2
closest_separation_from_pix_neighbor = 1.0e8
for neighbor_index in range(pixel_neighbors_size[nearest_pix_pixel_index]):
neighbor = pixel_neighbors[nearest_pix_pixel_index, neighbor_index]
separation_from_neighbor = (regular_grid[regular_index, 0] - pixel_centres[neighbor, 0]) ** 2 + \
(regular_grid[regular_index, 1] - pixel_centres[neighbor, 1]) ** 2
if separation_from_neighbor < closest_separation_from_pix_neighbor:
closest_separation_from_pix_neighbor = separation_from_neighbor
closest_neighbor_index = neighbor_index
neighboring_pix_pixel_index = pixel_neighbors[nearest_pix_pixel_index, closest_neighbor_index]
sub_to_neighboring_pix_distance = closest_separation_from_pix_neighbor
if sub_to_nearest_pix_distance <= sub_to_neighboring_pix_distance:
regular_to_pix[regular_index] = nearest_pix_pixel_index
break
else:
nearest_pix_pixel_index = neighboring_pix_pixel_index
return regular_to_pix
@decorator_util.jit()
def voronoi_sub_to_pix_from_grids_and_geometry(sub_grid, regular_to_nearest_pix, sub_to_regular, pixel_centres,
pixel_neighbors, pixel_neighbors_size):
""" Compute the mappings between a set of sub-grid pixels and pixelization pixels, using information on \
how the regular pixels hosting each sub-pixel map to their closest pixelization pixel on the image-plane pix-grid \
and the pixelization's pixel centres.
To determine the complete set of sub-pixel to pixelization pixel mappings, we must pair every sub-pixel to \
its nearest pixel. Using a full nearest neighbor search to do this is slow, thus the pixel neighbors (derived via \
the Voronoi grid) are used to localize each nearest neighbor search by using a graph search.
Parameters
----------
regular_grid : RegularGrid
The grid of (y,x) arc-second coordinates at the centre of every unmasked pixel, which has been traced to \
to an irregular grid via lens.
regular_to_nearest_pix : ndarray
A 1D array that maps every regular-grid pixel to its nearest pix-grid pixel (as determined on the unlensed \
2D array).
pixel_centres : (float, float)
The (y,x) centre of every Voronoi pixel in arc-seconds.
pixel_neighbors : ndarray
An array of length (voronoi_pixels) which provides the index of all neighbors of every pixel in \
the Voronoi grid (entries of -1 correspond to no neighbor).
pixel_neighbors_size : ndarray
An array of length (voronoi_pixels) which gives the number of neighbors of every pixel in the \
Voronoi grid.
"""
sub_to_pix = np.zeros((sub_grid.shape[0]))
for sub_index in range(sub_grid.shape[0]):
nearest_pix_pixel_index = regular_to_nearest_pix[sub_to_regular[sub_index]]
while True:
nearest_pix_pixel_center = pixel_centres[nearest_pix_pixel_index]
sub_to_nearest_pix_distance = (sub_grid[sub_index, 0] - nearest_pix_pixel_center[0]) ** 2 + \
(sub_grid[sub_index, 1] - nearest_pix_pixel_center[1]) ** 2
closest_separation_from_pix_to_neighbor = 1.0e8
for neighbor_index in range(pixel_neighbors_size[nearest_pix_pixel_index]):
neighbor = pixel_neighbors[nearest_pix_pixel_index, neighbor_index]
separation_from_neighbor = (sub_grid[sub_index, 0] - pixel_centres[neighbor, 0]) ** 2 + \
(sub_grid[sub_index, 1] - pixel_centres[neighbor, 1]) ** 2
if separation_from_neighbor < closest_separation_from_pix_to_neighbor:
closest_separation_from_pix_to_neighbor = separation_from_neighbor
closest_neighbor_index = neighbor_index
neighboring_pix_pixel_index = pixel_neighbors[nearest_pix_pixel_index, closest_neighbor_index]
sub_to_neighboring_pix_distance = closest_separation_from_pix_to_neighbor
if sub_to_nearest_pix_distance <= sub_to_neighboring_pix_distance:
sub_to_pix[sub_index] = nearest_pix_pixel_index
break
else:
nearest_pix_pixel_index = neighboring_pix_pixel_index
return sub_to_pix
| 48.425
| 120
| 0.696825
| 1,052
| 7,748
| 4.831749
| 0.129278
| 0.062955
| 0.061971
| 0.055086
| 0.838481
| 0.794806
| 0.769034
| 0.731458
| 0.720047
| 0.681094
| 0
| 0.007031
| 0.247419
| 7,748
| 160
| 121
| 48.425
| 0.864689
| 0.431466
| 0
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.033333
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6bf775113f8745c071d87610893e8b61a508ce8f
| 150
|
py
|
Python
|
pymgl/tests/conftest.py
|
brendan-ward/pymgl
|
c88e652023601736b73bd60f5fb7df6359255f28
|
[
"MIT"
] | 3
|
2022-03-01T21:38:38.000Z
|
2022-03-03T02:10:07.000Z
|
pymgl/tests/conftest.py
|
brendan-ward/pymgl
|
c88e652023601736b73bd60f5fb7df6359255f28
|
[
"MIT"
] | 1
|
2022-03-07T21:25:17.000Z
|
2022-03-08T20:27:11.000Z
|
pymgl/tests/conftest.py
|
brendan-ward/pymgl
|
c88e652023601736b73bd60f5fb7df6359255f28
|
[
"MIT"
] | null | null | null |
import pytest
from .common import read_style
@pytest.fixture(scope="session")
def empty_style():
return read_style("example-style-empty.json")
| 16.666667
| 49
| 0.76
| 21
| 150
| 5.285714
| 0.666667
| 0.162162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 150
| 8
| 50
| 18.75
| 0.840909
| 0
| 0
| 0
| 0
| 0
| 0.206667
| 0.16
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
d40e7a60ff68e0ff2cf1ef106526416a64c5b152
| 126
|
py
|
Python
|
keepachangelog/__init__.py
|
Mulugruntz/keepachangelog
|
3538c5b4beb433293352d084f4df7bcce4d02bf1
|
[
"MIT"
] | null | null | null |
keepachangelog/__init__.py
|
Mulugruntz/keepachangelog
|
3538c5b4beb433293352d084f4df7bcce4d02bf1
|
[
"MIT"
] | null | null | null |
keepachangelog/__init__.py
|
Mulugruntz/keepachangelog
|
3538c5b4beb433293352d084f4df7bcce4d02bf1
|
[
"MIT"
] | null | null | null |
from keepachangelog.version import __version__
from keepachangelog._changelog import to_dict, to_raw_dict, release, from_dict
| 42
| 78
| 0.873016
| 17
| 126
| 5.941176
| 0.529412
| 0.356436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087302
| 126
| 2
| 79
| 63
| 0.878261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2e0b8e695c44701040a1b322561549feb15f5d85
| 23,570
|
py
|
Python
|
tests/structures/test_java.py
|
cherub96/voc
|
2692d56059e4d4a52768270feaf5179b23609b04
|
[
"BSD-3-Clause"
] | null | null | null |
tests/structures/test_java.py
|
cherub96/voc
|
2692d56059e4d4a52768270feaf5179b23609b04
|
[
"BSD-3-Clause"
] | null | null | null |
tests/structures/test_java.py
|
cherub96/voc
|
2692d56059e4d4a52768270feaf5179b23609b04
|
[
"BSD-3-Clause"
] | null | null | null |
from unittest import expectedFailure
from ..utils import TranspileTestCase
class JavaTests(TranspileTestCase):
def test_multiple_constructors(self):
"The appropriate constructor for a native Java class can be interpolated from args"
self.assertJavaExecution(
"""
from java.lang import StringBuilder
builder = StringBuilder("Hello, ")
builder.append("world")
print(builder)
print("Done.")
""",
"""
Hello, world
Done.
""", run_in_function=False)
def test_most_specific_constructor(self):
"The most specific constructor for a native Java class will be selected based on argument."
self.assertJavaExecution(
"""
from com.example import MyClass
obj1 = MyClass()
obj2 = MyClass(1.234)
obj3 = MyClass(3742)
print("Done.")
""",
java={
'com/example/MyClass': """
package com.example;
public class MyClass {
public MyClass() {
System.out.println("No argument");
}
public MyClass(int arg) {
System.out.println("Integer argument " + arg);
}
public MyClass(double arg) {
System.out.println("Double argument " + arg);
}
}
"""
},
out="""
No argument
Double argument 1.234
Integer argument 3742
Done.
""", run_in_function=False)
def test_field(self):
"Native fields on an instance can be accessed"
self.assertJavaExecution("""
from com.example import MyClass
print("Class is", MyClass)
obj1 = MyClass()
print("Field is", MyClass.field)
print("Field from instance is", obj1.field)
obj1.field = 37
print("Updated Field from instance is", obj1.field)
print("Done.")
""",
java={
'com/example/MyClass': """
package com.example;
public class MyClass {
public int field = 42;
}
"""
},
out="""
Class is <class 'com.example.MyClass'>
Field is <unbound native field public int com.example.MyClass.field>
Field from instance is 42
Updated Field from instance is 37
Done.
""")
def test_static_field(self):
"Class constants can be accessed"
self.assertJavaExecution("""
from com.example import MyClass
print("Class is", MyClass)
obj1 = MyClass()
print("Static field is", MyClass.static_field)
MyClass.static_field = 37
print("Updated static field is", MyClass.static_field)
print("Static field from instance is", obj1.static_field)
MyClass.static_field = 42
print("Updated static field from instance is", obj1.static_field)
print("Done.")
""",
java={
'com/example/MyClass': """
package com.example;
public class MyClass {
public static int static_field = 42;
}
"""
},
out="""
Class is <class 'com.example.MyClass'>
Static field is 42
Updated static field is 37
Static field from instance is 37
Updated static field from instance is 42
Done.
""")
def test_superclass_field(self):
"Native fields defined on a superclass can be accessed"
self.assertJavaExecution("""
from com.example import MyBase, MyClass
print("Base class is", MyBase)
print("Class is", MyClass)
obj1 = MyClass()
print("Base field on superclass is", MyBase.base_field)
print("Base field is", MyClass.base_field)
print("Base field from instance is", obj1.base_field)
print("Field is", MyClass.field)
print("Field from instance is", obj1.field)
print("Done.")
""",
java={
'com/example/MyBase': """
package com.example;
public class MyBase {
public int base_field = 37;
}
""",
'com/example/MyClass': """
package com.example;
public class MyClass extends MyBase {
public int field = 42;
}
"""
},
out="""
Base class is <class 'com.example.MyBase'>
Class is <class 'com.example.MyClass'>
Base field on superclass is <unbound native field public int com.example.MyBase.base_field>
Base field is <unbound native field public int com.example.MyBase.base_field>
Base field from instance is 37
Field is <unbound native field public int com.example.MyClass.field>
Field from instance is 42
Done.
""")
def test_superclass_static_field(self):
"Native static fields defined on a superclass can be accessed"
self.assertJavaExecution("""
from com.example import MyBase, MyClass
print("Base class is", MyBase)
print("Class is", MyClass)
obj1 = MyClass()
print("Static base field on superclass is", MyBase.base_static_field)
print("Static base field is", MyClass.base_static_field)
print("Static base field from instance is", obj1.base_static_field)
print("Static field is", MyClass.static_field)
print("Static field from instance is", obj1.static_field)
print("Done.")
""",
java={
'com/example/MyBase': """
package com.example;
public class MyBase {
public static int base_static_field = 37;
}
""",
'com/example/MyClass': """
package com.example;
public class MyClass extends MyBase {
public static int static_field = 42;
}
"""
},
out="""
Base class is <class 'com.example.MyBase'>
Class is <class 'com.example.MyClass'>
Static base field on superclass is 37
Static base field is 37
Static base field from instance is 37
Static field is 42
Static field from instance is 42
Done.
""")
def test_constant(self):
"Instance constants can be accessed"
self.assertJavaExecution("""
from com.example import MyClass
print("Class is", MyClass)
obj1 = MyClass()
print("Constant is", MyClass.CONSTANT)
print("Constant from instance is", obj1.CONSTANT)
print("Done.")
""",
java={
'com/example/MyClass': """
package com.example;
public class MyClass {
public final int CONSTANT = 42;
}
"""
},
out="""
Class is <class 'com.example.MyClass'>
Constant is <unbound native field public final int com.example.MyClass.CONSTANT>
Constant from instance is 42
Done.
""")
def test_static_constant(self):
"Class constants can be accessed"
self.assertJavaExecution("""
from com.example import MyClass
print("Class is", MyClass)
obj1 = MyClass()
print("Static constant is", MyClass.STATIC_CONSTANT)
print("Static constant from instance is", obj1.STATIC_CONSTANT)
print("Done.")
""",
java={
'com/example/MyClass': """
package com.example;
public class MyClass {
public static final int STATIC_CONSTANT = 42;
}
"""
},
out="""
Class is <class 'com.example.MyClass'>
Static constant is 42
Static constant from instance is 42
Done.
""")
def test_method(self):
"Native methods on an instance can be accessed"
self.assertJavaExecution("""
from com.example import MyClass
print("Class is", MyClass)
obj = MyClass()
print("Method is", MyClass.method)
print("Method from instance is", obj.method)
obj.method()
print("Done.")
""",
java={
'com/example/MyClass': """
package com.example;
public class MyClass {
public void method() {
System.out.println("Hello from the instance!");
}
}
"""
},
out="""
Class is <class 'com.example.MyClass'>
Method is <native function com.example.MyClass.method>
Method from instance is <bound native method com.example.MyClass.method of <Native class com.example.MyClass object at 0xXXXXXXXX>>
Hello from the instance!
Done.
""")
def test_static_method(self):
"Native static methods on an instance can be accessed"
self.assertJavaExecution("""
from com.example import MyClass
print("Class is", MyClass)
obj = MyClass()
print("Static method is", MyClass.method)
MyClass.method()
print("Static method from instance is", obj.method)
obj.method()
print("Done.")
""",
java={
'com/example/MyClass': """
package com.example;
public class MyClass {
public static void method() {
System.out.println("Hello from the class!");
}
}
"""
},
out="""
Class is <class 'com.example.MyClass'>
Static method is <native function com.example.MyClass.method>
Hello from the class!
Static method from instance is <bound native method com.example.MyClass.method of <Native class com.example.MyClass object at 0xXXXXXXXX>>
Hello from the class!
Done.
""")
def test_superclass_method(self):
"Native methods defined on a superclass can be accessed"
self.assertJavaExecution("""
from com.example import MyBase, MyClass
print("Base class is", MyBase)
print("Class is", MyClass)
print("Base method on superclass is", MyBase.base_method)
print("Method on superclass is", MyBase.method)
print("Base method is", MyClass.base_method)
print("Method is", MyClass.method)
obj1 = MyBase()
print("Base method from superclass instance is", obj1.base_method)
obj1.base_method()
print("Method from superclass instance is", obj1.method)
obj1.method()
obj2 = MyClass()
print("Base method from instance is", obj2.base_method)
obj2.base_method()
print("Method from instance is", obj2.method)
obj2.method()
print("Done.")
""",
java={
'com/example/MyBase': """
package com.example;
public class MyBase {
public void base_method() {
System.out.println("Hello from the base!");
}
public void method() {
System.out.println("Goodbye from the base!");
}
}
""",
'com/example/MyClass': """
package com.example;
public class MyClass extends MyBase {
public void method() {
System.out.println("Hello from the instance!");
}
}
"""
},
out="""
Base class is <class 'com.example.MyBase'>
Class is <class 'com.example.MyClass'>
Base method on superclass is <native function com.example.MyBase.base_method>
Method on superclass is <native function com.example.MyBase.method>
Base method is <native function com.example.MyBase.base_method>
Method is <native function com.example.MyClass.method>
Base method from superclass instance is <bound native method com.example.MyBase.base_method of <Native class com.example.MyBase object at 0xXXXXXXXX>>
Hello from the base!
Method from superclass instance is <bound native method com.example.MyBase.method of <Native class com.example.MyBase object at 0xXXXXXXXX>>
Goodbye from the base!
Base method from instance is <bound native method com.example.MyBase.base_method of <Native class com.example.MyClass object at 0xXXXXXXXX>>
Hello from the base!
Method from instance is <bound native method com.example.MyClass.method of <Native class com.example.MyClass object at 0xXXXXXXXX>>
Hello from the instance!
Done.
""")
def test_superclass_static_method(self):
"Native static methods defined on a superclass can be accessed"
self.assertJavaExecution("""
from com.example import MyBase, MyClass
print("Base class is", MyBase)
print("Class is", MyClass)
print("Static base method on superclass is", MyBase.base_static_method)
MyBase.base_static_method()
print("Static method on superclass is", MyBase.static_method)
MyBase.static_method()
print("Static base method is", MyClass.base_static_method)
MyClass.base_static_method()
print("Static method is", MyClass.static_method)
MyClass.static_method()
obj1 = MyBase()
print("Base static method from superclass instance is", obj1.base_static_method)
obj1.base_static_method()
print("Static method from superclass instance is", obj1.static_method)
obj1.static_method()
obj2 = MyClass()
print("Base static method from instance is", obj2.base_static_method)
obj2.base_static_method()
print("Static method from instance is", obj2.static_method)
obj2.static_method()
print("Done.")
""",
java={
'com/example/MyBase': """
package com.example;
public class MyBase {
public static void base_static_method() {
System.out.println("Hello from the base!");
}
public static void static_method() {
System.out.println("Goodbye from the base!");
}
}
""",
'com/example/MyClass': """
package com.example;
public class MyClass extends MyBase {
public static void static_method() {
System.out.println("Hello from the class!");
}
}
"""
},
out="""
Base class is <class 'com.example.MyBase'>
Class is <class 'com.example.MyClass'>
Static base method on superclass is <native function com.example.MyBase.base_static_method>
Hello from the base!
Static method on superclass is <native function com.example.MyBase.static_method>
Goodbye from the base!
Static base method is <native function com.example.MyBase.base_static_method>
Hello from the base!
Static method is <native function com.example.MyClass.static_method>
Hello from the class!
Base static method from superclass instance is <bound native method com.example.MyBase.base_static_method of <Native class com.example.MyBase object at 0xXXXXXXXX>>
Hello from the base!
Static method from superclass instance is <bound native method com.example.MyBase.static_method of <Native class com.example.MyBase object at 0xXXXXXXXX>>
Goodbye from the base!
Base static method from instance is <bound native method com.example.MyBase.base_static_method of <Native class com.example.MyClass object at 0xXXXXXXXX>>
Hello from the base!
Static method from instance is <bound native method com.example.MyClass.static_method of <Native class com.example.MyClass object at 0xXXXXXXXX>>
Hello from the class!
Done.
""")
def test_inner_class_constant(self):
"Constants on an inner class can be accessed"
self.assertJavaExecution("""
from com.example import OuterClass
print("Outer class is", OuterClass)
print("Outer constant is", OuterClass.OUTER_CONSTANT)
print("Inner class is", OuterClass.InnerClass)
print("Inner constant is", OuterClass.InnerClass.INNER_CONSTANT)
print("Done.")
""",
java={
'com/example/OuterClass': """
package com.example;
public class OuterClass {
public static final int OUTER_CONSTANT = 42;
public static class InnerClass {
public static final int INNER_CONSTANT = 37;
}
}
"""
},
out="""
Outer class is <class 'com.example.OuterClass'>
Outer constant is 42
Inner class is <class 'com.example.OuterClass$InnerClass'>
Inner constant is 37
Done.
""")
def test_inner_class_method(self):
"Inner classes can be instantiated, and methods invoked"
self.assertJavaExecution("""
from com.example import OuterClass
print("Outer class is", OuterClass)
obj1 = OuterClass()
obj1.method()
print("Inner class is", OuterClass.InnerClass)
obj2 = OuterClass.InnerClass(obj1)
obj2.method()
print("Done.")
""",
java={
'com/example/OuterClass': """
package com.example;
public class OuterClass {
public class InnerClass {
public void method() {
System.out.println("Hello from the inside!");
}
}
public void method() {
System.out.println("Hello from the outside!");
}
}
"""
},
out="""
Outer class is <class 'com.example.OuterClass'>
Hello from the outside!
Inner class is <class 'com.example.OuterClass$InnerClass'>
Hello from the inside!
Done.
""")
def test_static_inner_class_constant(self):
"Constants on a static inner class can be accessed"
self.assertJavaExecution("""
from com.example import OuterClass
print("Outer class is", OuterClass)
print("Outer constant is", OuterClass.OUTER_CONSTANT)
print("Inner class is", OuterClass.InnerClass)
print("Inner constant is", OuterClass.InnerClass.INNER_CONSTANT)
print("Done.")
""",
java={
'com/example/OuterClass': """
package com.example;
public class OuterClass {
public static final int OUTER_CONSTANT = 42;
public static class InnerClass {
public static final int INNER_CONSTANT = 37;
}
}
"""
},
out="""
Outer class is <class 'com.example.OuterClass'>
Outer constant is 42
Inner class is <class 'com.example.OuterClass$InnerClass'>
Inner constant is 37
Done.
""")
def test_static_inner_class_method(self):
"Static inner classes can be instantiated, and methods invoked"
self.assertJavaExecution("""
from com.example import OuterClass
print("Outer class is", OuterClass)
obj1 = OuterClass()
obj1.method()
print("Inner class is", OuterClass.InnerClass)
obj2 = OuterClass.InnerClass()
obj2.method()
print("Done.")
""",
java={
'com/example/OuterClass': """
package com.example;
public class OuterClass {
public static class InnerClass {
public void method() {
System.out.println("Hello from the inside!");
}
}
public void method() {
System.out.println("Hello from the outside!");
}
}
"""
},
out="""
Outer class is <class 'com.example.OuterClass'>
Hello from the outside!
Inner class is <class 'com.example.OuterClass$InnerClass'>
Hello from the inside!
Done.
""")
| 37.833066
| 180
| 0.492193
| 2,169
| 23,570
| 5.289534
| 0.047026
| 0.095877
| 0.056306
| 0.028763
| 0.885557
| 0.82838
| 0.780964
| 0.724048
| 0.697115
| 0.643249
| 0
| 0.010753
| 0.427917
| 23,570
| 622
| 181
| 37.893891
| 0.840107
| 0.03636
| 0
| 0.647619
| 0
| 0.019048
| 0.868689
| 0.108597
| 0
| 0
| 0
| 0
| 0.030476
| 1
| 0.030476
| false
| 0
| 0.030476
| 0
| 0.062857
| 0.182857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2e458d4962853fad9989501b67826e3c9c8360aa
| 170
|
py
|
Python
|
tracker/ticket/views.py
|
okin006/tracker
|
82fc7b658f2e5be9e7447881e893cfb1186fe2b1
|
[
"MIT"
] | null | null | null |
tracker/ticket/views.py
|
okin006/tracker
|
82fc7b658f2e5be9e7447881e893cfb1186fe2b1
|
[
"MIT"
] | null | null | null |
tracker/ticket/views.py
|
okin006/tracker
|
82fc7b658f2e5be9e7447881e893cfb1186fe2b1
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def home(request):
return HttpResponse("Bonjour tout le monde !")
| 24.285714
| 50
| 0.776471
| 23
| 170
| 5.73913
| 0.826087
| 0.151515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152941
| 170
| 6
| 51
| 28.333333
| 0.916667
| 0.135294
| 0
| 0
| 0
| 0
| 0.158621
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
2e4713f0833f32c9b3c0c22cdf397f03e0bbcfe7
| 132
|
py
|
Python
|
meta-cube/recipes-support/overc-system-agent/files/overc-system-agent-1.2/Overc/__init__.py
|
sajal-wr/meta-overc
|
6e65720884144d320960b8fe737b1e20bf53f555
|
[
"MIT"
] | 14
|
2016-03-23T20:44:11.000Z
|
2022-02-03T13:58:49.000Z
|
meta-cube/recipes-support/overc-system-agent/files/overc-system-agent-1.2/Overc/__init__.py
|
sajal-wr/meta-overc
|
6e65720884144d320960b8fe737b1e20bf53f555
|
[
"MIT"
] | 242
|
2016-03-24T18:06:33.000Z
|
2021-10-20T19:29:30.000Z
|
meta-cube/recipes-support/overc-system-agent/files/overc-system-agent-1.2/Overc/__init__.py
|
sajal-wr/meta-overc
|
6e65720884144d320960b8fe737b1e20bf53f555
|
[
"MIT"
] | 32
|
2016-02-19T20:54:01.000Z
|
2021-03-26T04:24:38.000Z
|
from Overc.overc import Overc
from Overc.utils import Utils
from Overc.package import Package
from Overc.container import Container
| 26.4
| 37
| 0.848485
| 20
| 132
| 5.6
| 0.3
| 0.321429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 132
| 4
| 38
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2e654e2871524b40a1baf0a7ffeaec3f4b6323d6
| 2,535
|
py
|
Python
|
place2planet/place2geojson.py
|
AndreaChlebikova/hacktober2018
|
edfcab5c6d6f11e269643c9180ffc0108b15e1e0
|
[
"Apache-2.0"
] | 2
|
2018-10-17T13:52:42.000Z
|
2020-05-06T22:56:13.000Z
|
place2planet/place2geojson.py
|
AndreaChlebikova/hacktober2018
|
edfcab5c6d6f11e269643c9180ffc0108b15e1e0
|
[
"Apache-2.0"
] | null | null | null |
place2planet/place2geojson.py
|
AndreaChlebikova/hacktober2018
|
edfcab5c6d6f11e269643c9180ffc0108b15e1e0
|
[
"Apache-2.0"
] | 1
|
2018-10-25T16:00:08.000Z
|
2018-10-25T16:00:08.000Z
|
import json
import requests
import shapely
import shapely.geometry as geom
from shapely.geometry import Point, box, Polygon, MultiPoint
def search(place,local):
if (',') in place:
place.split(',')
real=''.join(place)
r=requests.get('https://nominatim.openstreetmap.org/search?q='+real+'&format=jsonv2')
response=r.json()
for things in response:
try:
if len(response)>1 and things['importance']>=0.7:
lat=things['lat']
lon=things['lon']
center=Point(float(lon),float(lat)).buffer(0.11)
poly=center.simplify(4)
features = json.dumps(shapely.geometry.mapping(poly))
with open(local, 'w') as outfile:
outfile.write(features)
print('GeoJSON Exported to: '+str(local))
else:
lat=things['lat']
lon=things['lon']
center=Point(float(lon),float(lat)).buffer(0.11)
poly=center.simplify(4)
features = json.dumps(shapely.geometry.mapping(poly))
with open(local, 'w') as outfile:
outfile.write(features)
print('GeoJSON Exported to: '+str(local))
except Exception as e:
print(e)
else:
r=requests.get('https://nominatim.openstreetmap.org/search?q='+place+'&format=jsonv2')
response=r.json()
for things in response:
if len(response)>1 and things['importance']>=0.7:
lat=things['lat']
lon=things['lon']
center=Point(float(lon),float(lat)).buffer(0.11)
poly=center.simplify(4)
features = json.dumps(shapely.geometry.mapping(poly))
with open(local, 'w') as outfile:
outfile.write(features)
print('GeoJSON Exported to: '+str(local))
elif things['importance']>=0.7:
lat=things['lat']
lon=things['lon']
center=Point(float(lon),float(lat)).buffer(0.11)
poly=center.simplify(4)
features = json.dumps(shapely.geometry.mapping(poly))
with open(local, 'w') as outfile:
outfile.write(features)
print('GeoJSON Exported to: '+str(local))
#search(place="Bangalore,India",local=r"C:\planet_demo\bangalore.geojson")
| 43.706897
| 94
| 0.523077
| 276
| 2,535
| 4.800725
| 0.26087
| 0.067925
| 0.036226
| 0.045283
| 0.795472
| 0.795472
| 0.795472
| 0.795472
| 0.795472
| 0.655094
| 0
| 0.015682
| 0.345957
| 2,535
| 57
| 95
| 44.473684
| 0.783474
| 0.028797
| 0
| 0.727273
| 0
| 0
| 0.106461
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018182
| false
| 0
| 0.145455
| 0
| 0.163636
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2e79e11e6dcb606f01d7111ada80d56f25925607
| 935
|
py
|
Python
|
2.4.py
|
ChichaSasha/A-SD_DZ
|
536fa3f16cfcab1256b569140978227a06bd42e4
|
[
"Apache-2.0"
] | 2
|
2020-11-05T14:18:16.000Z
|
2020-11-16T18:32:12.000Z
|
2.4.py
|
ChichaSasha/A-SD_DZ
|
536fa3f16cfcab1256b569140978227a06bd42e4
|
[
"Apache-2.0"
] | null | null | null |
2.4.py
|
ChichaSasha/A-SD_DZ
|
536fa3f16cfcab1256b569140978227a06bd42e4
|
[
"Apache-2.0"
] | null | null | null |
def n_to_m(n):
n_2 = ""
# to base 2
while n > 0:
n_2 += str(n % 2)
n = n // 2
max_num = "0"
for i in range(len(n_2)):
n_2 = n_2[1:] + n_2[0]
if n_2 > max_num:
max_num = n_2
# to base 10
step = 1
ans_num = 0
for i in range(len(n_2) - 1, -1, -1):
if max_num[i] == '1':
ans_num += step
step *= 2
return ans_num
def other(n):
n_2 = []
# to base 2
while n > 0:
n_2.append(n % 2)
n = n // 2
max_num = n_2
for i in range(len(n_2)):
n_2 = n_2[1:] + [n_2[0]]
if n_2 > max_num:
print(n_2)
max_num = n_2
# to base 10
step =
ans_num = 0
for i in range(len(n_2) - 1, -1, -1):
if max_num[i] == 1:
ans_num += step
step *= 2
return ans_num
n = int(input())
print(other(n))
print(n_to_m(n), '\n', other(n))
| 17.980769
| 41
| 0.432086
| 173
| 935
| 2.115607
| 0.156069
| 0.131148
| 0.04918
| 0.10929
| 0.844262
| 0.81694
| 0.806011
| 0.754098
| 0.644809
| 0.63388
| 0
| 0.092421
| 0.42139
| 935
| 51
| 42
| 18.333333
| 0.584104
| 0.04385
| 0
| 0.552632
| 0
| 0
| 0.004505
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.078947
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5ce89784fe658efb4afcac5418c00424f42aee60
| 3,058
|
py
|
Python
|
plotGMM.py
|
BatyaGG/Time-Series-Gaussian-Mixture-Regression_using_only_numpy_and_scipy
|
c2f6c7d10e7dba6754541a3e6be6bcc77d78d65c
|
[
"MIT"
] | 41
|
2017-09-14T19:09:03.000Z
|
2022-01-10T14:51:12.000Z
|
plotGMM.py
|
BatyaGG/Time-Series-Gaussian-Mixture-Regression_using_only_numpy_and_scipy
|
c2f6c7d10e7dba6754541a3e6be6bcc77d78d65c
|
[
"MIT"
] | null | null | null |
plotGMM.py
|
BatyaGG/Time-Series-Gaussian-Mixture-Regression_using_only_numpy_and_scipy
|
c2f6c7d10e7dba6754541a3e6be6bcc77d78d65c
|
[
"MIT"
] | 16
|
2018-12-05T11:01:13.000Z
|
2021-04-07T12:31:13.000Z
|
import numpy as np
from matplotlib.path import Path
import matplotlib.patches as patches
import scipy.linalg as lin
import matplotlib.pyplot as plt
def plotGMM(Mu, Sigma, color,display_mode, ax):
a, nbData = np.shape(Mu)
lightcolor = np.asarray(color) + np.asarray([0.6,0.6,0.6])
a = np.nonzero(lightcolor > 1)
lightcolor[a] = 1
minsx = []
maxsx = []
minsy = []
maxsy = []
if display_mode==1:
nbDrawingSeg = 40
t = np.linspace(-np.pi,np.pi,nbDrawingSeg)
t = np.transpose(t)
for j in range (0,nbData):
stdev = lin.sqrtm(3*Sigma[:,:,j])
X = np.dot(np.transpose([np.cos(t), np.sin(t)]), np.real(stdev))
X = X + np.tile(np.transpose(Mu[:,j]), (nbDrawingSeg,1))
minsx.append(min(X[:,0]))
maxsx.append(max(X[:,0]))
minsy.append(min(X[:,1]))
maxsy.append(max(X[:,1]))
verts = []
codes = []
for i in range (0, nbDrawingSeg+1):
if i==0:
vert = (X[0,0], X[0,1])
code = Path.MOVETO
elif i!=nbDrawingSeg:
vert = (X[i,0], X[i,1])
code = Path.CURVE3
else:
vert = (X[0,0], X[0,1])
code = Path.CURVE3
verts.append(vert)
codes.append(code)
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor=lightcolor, edgecolor=color, lw=2)
ax.add_patch(patch)
ax.plot(Mu[0,:], Mu[1,:], "x",color = color)
# ax.set_xlim(min(minsx),max(maxsx))
# ax.set_ylim(min(minsy),max(maxsy))
elif display_mode == 2:
nbDrawingSeg = 40
t = np.linspace(-np.pi, np.pi, nbDrawingSeg)
t = np.transpose(t)
for j in range(0, nbData):
stdev = lin.sqrtm(3 * Sigma[:, :, j])
X = np.dot(np.transpose([np.cos(t), np.sin(t)]), np.real(stdev))
X = X + np.tile(np.transpose(Mu[:, j]), (nbDrawingSeg, 1))
minsx.append(min(X[:, 0]))
maxsx.append(max(X[:, 0]))
minsy.append(min(X[:, 1]))
maxsy.append(max(X[:, 1]))
verts = []
codes = []
for i in range(0, nbDrawingSeg+1):
if i == 0:
vert = (X[0, 0], X[0, 1])
code = Path.MOVETO
elif i != nbDrawingSeg:
vert = (X[i, 0], X[i, 1])
code = Path.CURVE3
else:
vert = (X[0, 0], X[0, 1])
code = Path.CURVE3
verts.append(vert)
codes.append(code)
path = Path(verts, codes)
patch = patches.PathPatch(path, linestyle=None, color = lightcolor)
ax.add_patch(patch)
ax.plot(Mu[0, :], Mu[1, :], "-",lw = 3, color=color)
# ax.set_xlim(min(minsx), max(maxsx))
# ax.set_ylim(min(minsy), max(maxsy))
| 35.55814
| 88
| 0.466972
| 394
| 3,058
| 3.601523
| 0.200508
| 0.016913
| 0.038055
| 0.019732
| 0.742777
| 0.742777
| 0.742777
| 0.742777
| 0.742777
| 0.742777
| 0
| 0.033473
| 0.374755
| 3,058
| 86
| 89
| 35.55814
| 0.708682
| 0.046109
| 0
| 0.72973
| 0
| 0
| 0.000687
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013514
| false
| 0
| 0.067568
| 0
| 0.081081
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cf3b7bb1d1b78311acff99e85f1adf3fc9c281d5
| 22
|
py
|
Python
|
swilite/__init__.py
|
EdTsft/swilite
|
b53e6167d599314b751887563fee4274335374ff
|
[
"MIT"
] | null | null | null |
swilite/__init__.py
|
EdTsft/swilite
|
b53e6167d599314b751887563fee4274335374ff
|
[
"MIT"
] | null | null | null |
swilite/__init__.py
|
EdTsft/swilite
|
b53e6167d599314b751887563fee4274335374ff
|
[
"MIT"
] | null | null | null |
from .prolog import *
| 11
| 21
| 0.727273
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cf461f774843015f42cd085c7d58792c3688a1a1
| 64
|
py
|
Python
|
callback.py
|
Roger-Heathcote/py3-http-server-skeleton
|
b776b2866ad30038511619ceae99436dd4a582df
|
[
"MIT"
] | null | null | null |
callback.py
|
Roger-Heathcote/py3-http-server-skeleton
|
b776b2866ad30038511619ceae99436dd4a582df
|
[
"MIT"
] | null | null | null |
callback.py
|
Roger-Heathcote/py3-http-server-skeleton
|
b776b2866ad30038511619ceae99436dd4a582df
|
[
"MIT"
] | null | null | null |
def callback(*req):
print(req)
print(getattr(req, 'data', ''))
| 21.333333
| 32
| 0.640625
| 9
| 64
| 4.555556
| 0.666667
| 0.390244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 64
| 3
| 32
| 21.333333
| 0.719298
| 0
| 0
| 0
| 0
| 0
| 0.061538
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
cf474df45d57ef28fe7cb364155caf6e1aaab00b
| 182
|
py
|
Python
|
items/base_item.py
|
voidrank/alchemy
|
854c9b867d21514b43dcd97e1b223a75435c3d81
|
[
"MIT"
] | 5
|
2016-12-26T07:56:14.000Z
|
2017-02-04T05:11:22.000Z
|
items/base_item.py
|
voidrank/alchemy
|
854c9b867d21514b43dcd97e1b223a75435c3d81
|
[
"MIT"
] | null | null | null |
items/base_item.py
|
voidrank/alchemy
|
854c9b867d21514b43dcd97e1b223a75435c3d81
|
[
"MIT"
] | 1
|
2018-08-05T14:51:06.000Z
|
2018-08-05T14:51:06.000Z
|
import numpy as np
class Field(object):
def __init__(self, **kwargs):
self._kwargs = kwargs
class BaseItem(object):
def __init__(self, **kwargs):
pass
| 12.133333
| 33
| 0.620879
| 22
| 182
| 4.727273
| 0.590909
| 0.288462
| 0.25
| 0.326923
| 0.442308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.269231
| 182
| 14
| 34
| 13
| 0.781955
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.142857
| 0.142857
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
cf4c0cbaafaad67f4f6c0ee209cdb7ef43acfd24
| 22,157
|
py
|
Python
|
Q5293.py
|
Linchin/python_leetcode_git
|
3d08ab04bbdbd2ce268f33c501fbb149662872c7
|
[
"MIT"
] | null | null | null |
Q5293.py
|
Linchin/python_leetcode_git
|
3d08ab04bbdbd2ce268f33c501fbb149662872c7
|
[
"MIT"
] | null | null | null |
Q5293.py
|
Linchin/python_leetcode_git
|
3d08ab04bbdbd2ce268f33c501fbb149662872c7
|
[
"MIT"
] | null | null | null |
"""
contest 12/21/2019
"""
class Solution:
def maxFreq(self, s: str, maxLetters: int, minSize: int, maxSize: int) -> int:
freq = {}
count_dict = {}
def count_letters(word):
if word in count_dict:
return count_dict[word]
unq = {}
for item in word:
if item in unq:
continue
else:
unq[item] = True
if len(unq) > maxLetters:
count_dict[word] = False
return False
count_dict[word] = True
return True
size = minSize
for i in range(0, len(s)-size+1):
current = s[i:i+size]
if count_letters(current):
if current in freq:
freq[current] += 1
else:
freq[current] = 1
if len(freq) == 0:
return 0
return freq[max(freq, key=freq.get)]
"ffhrimojtdwnwrwsmwxxprahdofmwzzcziskfyxvlteunhyjvmexcbxlrxtcsozrxyaxppdztpzqfcnpiwzhcvyyvpnlwwkhjlctlsbboosvyabdglhzvwdtazcyrumynkhqywrmyljhkxbpnwmfkxnqpchyjckwwpiqjljynsidcccffguyqmvnubgznsjzgkublxwvdjequsguchpzcfncervajafyhyjvoqetaxkybvqgbglmcoxxapmymxmmsqpddpctymxkkztnpiqcgrsybfrqzepnteiuzkvfnnfwsjwrshjclvkvjiwfqbvprbknvxotekxskjofozxiisnomismymubikpagnvgrchynsyjmwadhqzbfssktjmdkbztodwidpwbihfguxzgrjsuksfjuxfqvmqqojoyjznvoktfbwykwhaxorlduchkefnbpgknyoodaizarigbozvsikhxhokfpedydzxlcbasrxnenxrqxgkyfncgnhmbtxnigznqaawmslxehbshmelgfxaayttbsbhvrpsehituihvleityqckpfpmcjffhhgxdprsylnjvrezjdwjrzgqbdwdctfnvibhgcpmudxnoedfgejnbctrcxcvresawrgpvmgptwnwudqfdpqiokqbujzkalfwddfpeptqhewwrlrwdabafodecuxtoxgcsbezhkoceyydjkniryftqdoveipatvfrfkhdztibywbajknxvkrcvfhgbnjxnoefgdwbekrvaalzuwypkhwhmxtnmoggsogczhemzysagznnmjiiwwyekibytqhgwfzmsqlntvakyhaaxiqvlxbhgknvdxjwecccsquqqqmrxsysfyidsbtqytgonmzyjcydyqqqmixrbrllbcbbnwvriqcrznobzippyssjypvjemvadgdcriydntlpyrmxejtdyzhzdljhbyifxewdyokbhcgkhpdjoeqexfxqwvxys"
18
2
22
"fehjblcdljlmckggcigkedfjcejklicihegfhkfbgegjiikcjgfacicaiheibcicmbilbkhhejfdifdehbjelcalcjellkaimhelkjhafcmjhikbgihjlmjclibceecelkaccklbdaifgdflidhidagiahlbjcfbijgeldjgedldbdchkblbdmcdjbjhccikelcmjjbfkhlfekdhbcakgbclgeijbdhmcmemebkgjeeeickifjglmjfjcmjidjgjmijceiikhmmaagebhifhkfhemfeigdlijffcjgmdehjgllkaallheikhghceekhcckfegghdcalalhkhlgikaamladheakecccgafkimibhiafkkkdbflklbhdagdefdgjfihbiakmjbdlhmlhalekjhmjagjahbjflkjiljjbgfhmekifjdejijehfgfjajbbabcgdbhmjmjabfackghfjflcejdcbdfdamcagjbgicbilhdmfclmaemdgkfdgegicikmifbkcckfkkblldhidlmfgckiiceghfcedjbaggmfkkfiacaffkfmliligeadeghklcbhdkgdcgkijklhkbgjicmfiffaaebimmeicaajfikmfbfkemmadgdaiiicjfcfeffmmhhejfgilkalglmfbgckgcdmcbhimfkmhmcccibjcalhfbgmhkckjfmdaamaffheimfihmaifalbamkfeibghkghfbmkghdimmjcmbdbafdfakaideemalgijieifiaakdfbcjggmelclmijhjgjigfhcabgmimcmkbdidhdagbbjeablcdleleijagkaijlgfgiehimklcaidcdeaekeddijlhaijlfclfcflblklgadbdabickelhdlkhefilhcecejkfacfbhcabcjjjhllhelljdmkjgihfebdhbiljijlhclmhgejaecihjfigbdmleebhcaehcgadidbfjjhkkcgddlieidgabhhcghaeehbhghhacgckmkhklchaeeieghjibkmebcifllamgflhikhfkhhmaeekecbcgfblbikgehhdjmedggfdghaafmeghiiiaahgilfibddilfbkdgbjiecibbdekhjbkdhigigffcgmbikhdmbgelgkfidfjkddhfifkdgmihkbdlhlmlkhkbjlhdhgaafkcebcjjaagmkecechalmbheieibihefcllgliamigjgbjcjkgdjeimffhehcjciabgjhgkgmcmemfchiemfldfjimmbeiiiaedkhlkeeijecedclbkhkkekjecfjlilidfigammdgjkgahibdbbkbgjgbabebjcglgfaldgiglilhgfbicchideehgffhfcheamklkkdgfmakhdgmdclejcfgfdlmmbgjamlgchaabelcllalccckajmmkfghaefbebaibdkeegicgmfdgbilhllkfhcgfdeddkfciiibgjhikhaagdkkdmjllalfifjcijhljfebiaflhjdkhmaeejgjkkaelgglefccejidmgkddekjjffcbfjmbmkihmemaibadaihhchdfgiejglmkclcfjgajlgbeillgfbhkgldmfekjbdegjmiddaeaebiaedkdbmciceggbalffddijfccadhhkfgebakkfcmdegdkdbglaeblabjahcjillgmihifbgmiejbefjjecgfkjibejeemcibmcmiifmaiggljgikhiebgijfjafchcjbdmiffjigkmcfhejjagmddjmeckcdhbbdgdcmgfhlcaggjlijjhghihlammgkdekgbkfellfdkcfkigjjecffmgeikafadbfdaadiembbmiadbkbljmkfedllghlhemeaimbamlfcehegbgccfbcjblahdlaakeafmlkjljlkiaglmeideifgdbadjehhmmkfhdkldebegbbiiblkmidlmeejlaemkhfajmidlfcjgiejmmihllbigelbekkfagdcjdbmifdmmchcllmihjlmhblkfcbcjiiaejhgldjmieejhjiadfkfmgamcdlcljbfclkaflhjbeajdkdkjecifikmleblijjedcaccikggjcgidmfjegkbhcacalmbcdgbfjkjajclgdbfcdkemajlajeklieibjhcdheglagfeeagjbacmjdhadgelhemeefikmejlkdcghahfdkhaacghieffcgfgllmdgbkhejkjdcdddhdfdcdidejaekjeclccmedjjmaellmcgfiacbhdfmcdcielcalchbgagelhjjmmkljfagkfjijmddafglimkekjagmhgfiidjefjfmaihhbhhhaafhiekmdkgidjmljfgmgcijbbjmbjiikailalbffjhedbfbbhcbbbicblagibbdamalkiblhblhacdckllbliccmjgedkjbeihhglhbcfaefaimlbjfhmjadlmgdikjjkkghidlfblkdgdbagkldghadhmmckfhkddedlgdfdifghagkdjiklmfbdajfemjcjlamfflgiekmabhcigclbdfefkfmdaffeccgcdflacahhademhjlchabeabbfjfeefhmmbaajmmlmgfhbclkfaihkehjljjhdbkkieikajbbgmfiilkehcliacgggmidlkgjmcjkhjklddijjmjdkejajgllcechmmbfbibdddfgakfmgebkfcbbkjehemckcaefimgfiamhddahklgdhcdgicdmmdfgemlhdcaglcdkeehjkccgcllcldbkggjihdafcfkhkifmkadgkmbgkbgkmilldfhjebdjdfkhmfdhldjmkbcebbbaiemgkihggeebkaibkhajkamfhcbcckgkjbfamlbghhdcehigmehmafalbjedgdgddgjfkfmmeicjlcaajemkjiligbfcbliagicggjclclgidkibkddfgfkclfgdblfebfkcjelghejlejckbgiibedgaebaffcleemmcdgfgjlhdagdmgagiambakabajcjmlifiikckjjfbmafiahmlbhcfegdaekjcgjdbhefkcfdcgkkmlibchbfjbalkbkmgjfbgjlbiffeeabbmgjgbillamjeefklbbibkddcifdakjdlekbkcemkmgdhabdeiccijlicgaecbefmcjeemccegaldfaeafdedbakmiaakjlcbddkkidmkdkdifdgaeflhbkbadgebhhhlaeeajfheamkfkakgmamhaialdmbllbddfidaibffmihfehddlhbemlgdkkikfhkigfkbfjijfiahkfhihkgmblfgidflleameaicgkmimdejkkddddfagfjceffmmkmcffkdfmfjbgjdkbgbelkgjcfhiijlijfeiimcblamiecbmaifejeklfeggfkeiamalhjgklhaellimjelhbgjcghjbfkdjhlmhgkafkkdkkfldbafljgchilbleabgiejfgjhhgcejjjbhkmblkiljbeafhlbdecimdejflhkbkccbkmljldjaihddjmajefjkkdmjkhghdhkhbhmkhjkldlfjjdhdklkheajceelahchhicmkjhekdejdefabaceemjbhimlfjihdmcbhlgihkhgdaibgfbfebadiadkmbjmhgifhefejjgkihkfcbdkjcecjmcifjidfegblklbbabjcfbighkaemgklbidlckebdlgmklifibghalbglmaihkggjcjljgibahghealfhhfiglljdhbffleccdjechchicddkfgimahhmgbjhdlheadfmahelbkhkkgmchljaaekcjhclhghdkebfkcadfajbihemfmjibaidhabdmblakajkddbajemkhebkdkafchalahkijkblmmfakkmdeikhbfhmekakhkmfgjkgljggacmamklbmkdkldmgggajmkaaeimjbffigdjffemcjdfklgbmclkjfhljhfldjkbdfihcjhiaeccafjajldibdlmbkigidecbecbgmlbfcljhieejegclgdeclcfblglgkbmfkhecjgkkkkleeledlmigcijbblhbkeeeifggbkihglgekbjedficgafflgdmhbgajjdajcjalggbciefmbimgabjcbehacagejjbcldalbgfgmiflicdcbabhkmddemieaheldmihcagiledmafagiajgffflfihfghhkkdhlijdgiimbdefehhdkeakddmhedcamjbimigmfajjemlgfdaalelecbifmkjccaefemaijddlmbkmlldhfbklljdedhahajhjmcmaglmbhjagjiifhkdbiehggajddkjchkbeddkahljjgefeffcbdlhkemmecdmbimdmamljhcicfiaambjehjmkjhfajadkeacgcadmcmfkbghbljbfiadkmaacabflejigcialheaibehjblkieaalbclbmhlfekgmggdakhicfaicceggahmidhemaibaiaabfhdjjifbdbkceicgdikhljdhimamghcgjljacdikilhcahedamkgfafhffmlifdeclkekmchmlbigjhijlmfejjjhcdfmjaggfllkdijhadlgfhiiikefglibjclhgedfdmeifeegeelmliefjfjldkdihciclagljcgajdmeijljfdhjkkajfckgaddeaakmjhhahkijhjhfjijamdeakeabfhfifdfkcejjfdgcjjlehkbmmbabiblgjkdhglgjgecfhicildemlaakikfbcdflejfgclmlclbldgldddclhjgdelfjdegbhglmhakdagmgkecdkeihdijijlkckjbammeiafkhmfjieflkcbhiggdjdeaiccaaaaildkmcffkhajefjakgjcglibjcejabfhlddimighmlcggbebbdlhbbjhikagificilmlcbidehkdfeimialijcbfmlgejldbleljgclfhiamhhgcgfjgcjgkmahkchbagfkkakcklefiimhekhckagcmcjadblhljjljdklcgidggmebmfifbfjcgcbhcgehkdikefecmhajjheaecjdiblhhcfcgfgdkjcfgjmhegahfeamclcmjemidkmkjfaecekchmkigdejeeiihlekgiggkcgmblaiblalacddicmehmjhlhmkfleaamamgbdaghdilgcjmfaklbcbldcmikakbmailkkjjlgjiaddfcbcfciladbeedhglebmefjgjfdhebjikbeldkmjldaekgjglbkiagkmlagblideedeehembjdliladifemkgchmlchlbjiaglmbikleclgeefhjlimalibckjgfjfgffhikllghbldhelgjmiifilgkkbdclkggijikbkieldgmggbjcgcfbjaedgclfahajlahllflihbkmakehbgdjbchdajigbdgiefaaadjkkjbjbekdfhaidjfgjgjablkggbagbbhmlkikdhblmfifldbmefjbljgkmdgbbcellefjgmbeladfjbibbjedccaebjakkadcmclihbgcfmjbdldmfcjifcaadibkfkdighjfhgjjaeifdebdkbjhbkibjimmmembkliildfbchbfablcmmjeigemdlkgbgbcfgibekbihkhklhkhkdacjlibkkdlbebbbdkkfdmlbijhammeeeejlfbheicdbcbgeeccfbabjlhadbhbhkmfgfichadjjiakjgagjadkkbggcjkbdciddjmflgedcihmgalkbehccmcagmmifcckcadgclbehhddbcaaiglachgdmhlammfhifahggigbkjblhlbedjldcjkfkglfjkjidciemkjkhkflfldkbhkjgcigdfdlblfkigalkijgmdmiabdiakbcfdldcmkkffihmemakiakfggadcjccckflemckgldjhiblgkhakfccbabfbjidhfmlbkjbedkfmhjjijijfbemffccmccckmhhaadcamfhmikmabkcmklbcikhkhfmdghhihllmekhefbdhgbdhldakljemeggdgabieebcklgkjmcgddhgfmkdbcafgkmhdjfkgdcfalkaadllcmglbkefkllhjghhdfdejbmfkcagaicfmigbdgaldjebejbhmggbkacickeidiimecglbdeeaceedgabballkmjjbjlkjgcjhiibbiflkggcgdemhimegghdjmlcbmhgmhblegehmecflcmmljakfidkmlbhjjdkhmccadkckalkgdiijmbgmceiejkmkabdbmikmlgabheidhbmdkdalhgfigafmccdhkggmbjabkdflckkflacecklaccmlailedldkkbddcjhbhldkimedlhblckbagdbcekmgicjaeemmjiljbiglfggfmgjmabcialkffdamjgfbgmjdfjgafjehdfcgideedgigalffjgcgdkbkfiijiaiglggdbmbflickgamjgghdllfjmhajmgleebdghejihmimlclfidcalfijmlbmejhijfgfjjhechfachlfekgacfmimhbalgcecaijajamchbfaghlljmaihfdajflhmhbgkmjdckdldfgmmcjijebafblikkklbheejfgfhfhmejgfmcakjdfdleejlmaahafgfikhjmlbjbbekbjlkkjflkagmhkfgabcildgfbdckelakmbckeigdddicbkacbfgdejjmegkcflhcajjmhlhkbccfgebhamhgfaggcdjgejcdfcjkcdmbijabjgfbfkgdbagmdflfhfjgaeimajljaamadglkmahjmfbbjhhkmdclcichackjdhmdmegfjdhghmhmkefhklgbjcdbmlblmjmkhcdbdmhhfkhicdlmidbgfcdiakgdmmlldfkafjeaegiifcbkgcbaghbcbcfdmkkalcibdahekgdhkflimafkdekmmdahmhedmakdahjidabhggegfcihkjieeffhefbfjfhemjfbmjfkjidgddimajdimjlljfjahiehafeijhmhilkekdcdiekimaicdfalkgemdjdijfdldajmhgdcmgkcdmmbaiceabkdmejfgdfdcgihibmahmkhmelihggeklgamcecifigekhimdbgkhddlhaeimmgleiikjcjkijfkblgemmefecdahbeckgjjfklmlekkgjlccjfgblkkibljfegbdifcjgdmecglilcmibbdcbficdbheclcejcbagfhgmihamehmligjbmaccimbmejdcabmacfabkkfkacffhhbdechlbgeifjmbkbhdikhahkebafjjkjcejcaciagahjghhjhkeefhjjcfmmahfdkhchhklegjlbbbcdlfcclflgfiibljmbbjhkdjdleegekccaejbhejikkchmmfjejjljiggieabmefajhkgkledgkkejibmbahhehmfdakcfbhemdmemjbgjfgbfgdlflbhkmfackkceeigejdaggfidmfcdaccmmhlmifdddgagmfmejhfbaicccdeijbhefabejkghlmckfdbkjddgdakldccfdgjdghcdhdhjdlkgccehhlbjbkkmeceihgcmiklblkabfmmilicjilgehfhbdihmikgckieggbbbbmmcakkadfbbcffeaijfjmalmlfbdbjdckkfmbefihjiefhfgldmgahmlbgkcdeachjfjccjlcicfleblfdekilcfkgjefflhjckakgkfkdeikhjflddgebmhiiidcdhifhefcdableckklcmiekdgmlcdhjfljlcdbcafekbecaeemgjfcdjhfgeimddmaafihgffmfjmledefikjhefakdiabbkfjkfahhljklagjfbjhjbbcgejbaalhcjdcgfdcbkkjaemmmfgmbdadfmdiaifdmfgfmecdcbkcmbfcgmachffflaicadkjkdekbcidbkcbfdikfdmjlailmgalabejgldcdmfalhakmlgfblikgcaicdmkaiacehchjhkfjflkmfkclibdcljhhgmiecekecdbcemfahfheejmmiljemkdfflfiaijlkilhaeejackljkccllahkfhebmcbimmmbiabaalmdhiebefchkbabgkfmiabdfiaglgbaemmggdebjgbdchakdgekgekflmkllabadegfmegjhkgflelilhghalmmhimelmfcjgiabkbckkkeedbldbdhhmiclfjekmhhhfcfglclgglmifjihfgfgjgalhhbgbahbdfbdmjdlglicjhahljkejkcafdlikahemllljhgkeeiblkhfkjalgflcdlidkdceiefgjlifllchkhdmekimflfakiahbliflilkcmiihhckilkgkhlekfaikkjklbjjfabdfjeiikkibflgaediekjdiaiabileafkehimhbhbmmhcbdgfhiigbdebimecfhllaggdhlmfhijiekaaaffhmimejjcahhckhjmiamgbblkbjdhmmcccidcifmkkjhejicfmegclemfidelicjambgmkjeabffahiemehkglhmfilcbfiglfhfdemebkbmmeeimkadekmelffemllaachaemkikkemehfjkhmdfdkakdgbimedmmckidamlgdfeibkgickhldagfhflmecdmcglifedaeabfckjlkigecfhejlaicfifbffjmejhfbikflickdjadjjfdcglbhljbabefcammkicdlfbiklbjbkjhdcdbfafjleibdhjdcabjlfcddikhjbbchdffjdmdbkmgdafcbjchihjgiiijcgjmjkaahbdhljhfcmljhcaakickjdjifljmhebgkdhlhaadjimhemgbbegcjbgiafbmleklgahdamiegbfkekjkgkejbmlflkkdgkieecgkjhafblgkhhbkdbbfgkggccbgdchflkkcbakhcdkdbiailcighigcdedjekhmhihblgiiciffikaahghababklkegihiflmdahhgjmgbdjgclmjdlgcgeghffmdcahkilbajkggdbdijccmjbbdkhjmefeehfcadgeemghibiiimabmimhhdfffdejjibekdlkjghkhhhaaeemheedhkigcljkfjjmikaaaegjdkiefibcabelijmkgkkchjkaadfhjhackmbjelieefmljfbhkimkifigicmcfiidfcebmeadcagdikcmjcgkcfihdgmkeeigibjidghjmcaeccihdhljcmbdbellbdhfakhmdkjgbcgdkcaefdfkmamfjgkhkdemlmijjfichfkdhejchmmbggedmhifklkckaiciicibcemfhbjbcleljbcdelmbkheafbmddbgdamafgkachfedgahkllkekifldahlmeljkgekljeecmbbidkfhkfkdbkjbljbgbbabmfcbagbebdjiccjgciefkghmclijjhgcjeailbbbbcmjgjcgglggeckdmdmdhhjlgdkijbdefadcklcbjkghahlhafelbbhaeehecbckcdmfkiiadkkcaghbafejclbmbjhddhfibafligideflgdjfleehllfdbacibdbhejbcjldiemhccimgidkmfmgmdihgeelbalfmgghkaecfeijfblghabbkejbmackmkjffbdimccakldblefljbbddbaedjbibhafdjmlflfbgefjcghlgmalbjjbgbgdmbhghajblalbaacdiibhcblijgjcbjbfmedmiahlibbbdidlcelelklflemiemklfdckillga"
6
5
26
sol = Solution()
s = "fehjblcdljlmckggcigkedfjcejklicihegfhkfbgegjiikcjgfacicaiheibcicmbilbkhhejfdifdehbjelcalcjellkaimhelkjhafcmjhikbgihjlmjclibceecelkaccklbdaifgdflidhidagiahlbjcfbijgeldjgedldbdchkblbdmcdjbjhccikelcmjjbfkhlfekdhbcakgbclgeijbdhmcmemebkgjeeeickifjglmjfjcmjidjgjmijceiikhmmaagebhifhkfhemfeigdlijffcjgmdehjgllkaallheikhghceekhcckfegghdcalalhkhlgikaamladheakecccgafkimibhiafkkkdbflklbhdagdefdgjfihbiakmjbdlhmlhalekjhmjagjahbjflkjiljjbgfhmekifjdejijehfgfjajbbabcgdbhmjmjabfackghfjflcejdcbdfdamcagjbgicbilhdmfclmaemdgkfdgegicikmifbkcckfkkblldhidlmfgckiiceghfcedjbaggmfkkfiacaffkfmliligeadeghklcbhdkgdcgkijklhkbgjicmfiffaaebimmeicaajfikmfbfkemmadgdaiiicjfcfeffmmhhejfgilkalglmfbgckgcdmcbhimfkmhmcccibjcalhfbgmhkckjfmdaamaffheimfihmaifalbamkfeibghkghfbmkghdimmjcmbdbafdfakaideemalgijieifiaakdfbcjggmelclmijhjgjigfhcabgmimcmkbdidhdagbbjeablcdleleijagkaijlgfgiehimklcaidcdeaekeddijlhaijlfclfcflblklgadbdabickelhdlkhefilhcecejkfacfbhcabcjjjhllhelljdmkjgihfebdhbiljijlhclmhgejaecihjfigbdmleebhcaehcgadidbfjjhkkcgddlieidgabhhcghaeehbhghhacgckmkhklchaeeieghjibkmebcifllamgflhikhfkhhmaeekecbcgfblbikgehhdjmedggfdghaafmeghiiiaahgilfibddilfbkdgbjiecibbdekhjbkdhigigffcgmbikhdmbgelgkfidfjkddhfifkdgmihkbdlhlmlkhkbjlhdhgaafkcebcjjaagmkecechalmbheieibihefcllgliamigjgbjcjkgdjeimffhehcjciabgjhgkgmcmemfchiemfldfjimmbeiiiaedkhlkeeijecedclbkhkkekjecfjlilidfigammdgjkgahibdbbkbgjgbabebjcglgfaldgiglilhgfbicchideehgffhfcheamklkkdgfmakhdgmdclejcfgfdlmmbgjamlgchaabelcllalccckajmmkfghaefbebaibdkeegicgmfdgbilhllkfhcgfdeddkfciiibgjhikhaagdkkdmjllalfifjcijhljfebiaflhjdkhmaeejgjkkaelgglefccejidmgkddekjjffcbfjmbmkihmemaibadaihhchdfgiejglmkclcfjgajlgbeillgfbhkgldmfekjbdegjmiddaeaebiaedkdbmciceggbalffddijfccadhhkfgebakkfcmdegdkdbglaeblabjahcjillgmihifbgmiejbefjjecgfkjibejeemcibmcmiifmaiggljgikhiebgijfjafchcjbdmiffjigkmcfhejjagmddjmeckcdhbbdgdcmgfhlcaggjlijjhghihlammgkdekgbkfellfdkcfkigjjecffmgeikafadbfdaadiembbmiadbkbljmkfedllghlhemeaimbamlfcehegbgccfbcjblahdlaakeafmlkjljlkiaglmeideifgdbadjehhmmkfhdkldebegbbiiblkmidlmeejlaemkhfajmidlfcjgiejmmihllbigelbekkfagdcjdbmifdmmchcllmihjlmhblkfcbcjiiaejhgldjmieejhjiadfkfmgamcdlcljbfclkaflhjbeajdkdkjecifikmleblijjedcaccikggjcgidmfjegkbhcacalmbcdgbfjkjajclgdbfcdkemajlajeklieibjhcdheglagfeeagjbacmjdhadgelhemeefikmejlkdcghahfdkhaacghieffcgfgllmdgbkhejkjdcdddhdfdcdidejaekjeclccmedjjmaellmcgfiacbhdfmcdcielcalchbgagelhjjmmkljfagkfjijmddafglimkekjagmhgfiidjefjfmaihhbhhhaafhiekmdkgidjmljfgmgcijbbjmbjiikailalbffjhedbfbbhcbbbicblagibbdamalkiblhblhacdckllbliccmjgedkjbeihhglhbcfaefaimlbjfhmjadlmgdikjjkkghidlfblkdgdbagkldghadhmmckfhkddedlgdfdifghagkdjiklmfbdajfemjcjlamfflgiekmabhcigclbdfefkfmdaffeccgcdflacahhademhjlchabeabbfjfeefhmmbaajmmlmgfhbclkfaihkehjljjhdbkkieikajbbgmfiilkehcliacgggmidlkgjmcjkhjklddijjmjdkejajgllcechmmbfbibdddfgakfmgebkfcbbkjehemckcaefimgfiamhddahklgdhcdgicdmmdfgemlhdcaglcdkeehjkccgcllcldbkggjihdafcfkhkifmkadgkmbgkbgkmilldfhjebdjdfkhmfdhldjmkbcebbbaiemgkihggeebkaibkhajkamfhcbcckgkjbfamlbghhdcehigmehmafalbjedgdgddgjfkfmmeicjlcaajemkjiligbfcbliagicggjclclgidkibkddfgfkclfgdblfebfkcjelghejlejckbgiibedgaebaffcleemmcdgfgjlhdagdmgagiambakabajcjmlifiikckjjfbmafiahmlbhcfegdaekjcgjdbhefkcfdcgkkmlibchbfjbalkbkmgjfbgjlbiffeeabbmgjgbillamjeefklbbibkddcifdakjdlekbkcemkmgdhabdeiccijlicgaecbefmcjeemccegaldfaeafdedbakmiaakjlcbddkkidmkdkdifdgaeflhbkbadgebhhhlaeeajfheamkfkakgmamhaialdmbllbddfidaibffmihfehddlhbemlgdkkikfhkigfkbfjijfiahkfhihkgmblfgidflleameaicgkmimdejkkddddfagfjceffmmkmcffkdfmfjbgjdkbgbelkgjcfhiijlijfeiimcblamiecbmaifejeklfeggfkeiamalhjgklhaellimjelhbgjcghjbfkdjhlmhgkafkkdkkfldbafljgchilbleabgiejfgjhhgcejjjbhkmblkiljbeafhlbdecimdejflhkbkccbkmljldjaihddjmajefjkkdmjkhghdhkhbhmkhjkldlfjjdhdklkheajceelahchhicmkjhekdejdefabaceemjbhimlfjihdmcbhlgihkhgdaibgfbfebadiadkmbjmhgifhefejjgkihkfcbdkjcecjmcifjidfegblklbbabjcfbighkaemgklbidlckebdlgmklifibghalbglmaihkggjcjljgibahghealfhhfiglljdhbffleccdjechchicddkfgimahhmgbjhdlheadfmahelbkhkkgmchljaaekcjhclhghdkebfkcadfajbihemfmjibaidhabdmblakajkddbajemkhebkdkafchalahkijkblmmfakkmdeikhbfhmekakhkmfgjkgljggacmamklbmkdkldmgggajmkaaeimjbffigdjffemcjdfklgbmclkjfhljhfldjkbdfihcjhiaeccafjajldibdlmbkigidecbecbgmlbfcljhieejegclgdeclcfblglgkbmfkhecjgkkkkleeledlmigcijbblhbkeeeifggbkihglgekbjedficgafflgdmhbgajjdajcjalggbciefmbimgabjcbehacagejjbcldalbgfgmiflicdcbabhkmddemieaheldmihcagiledmafagiajgffflfihfghhkkdhlijdgiimbdefehhdkeakddmhedcamjbimigmfajjemlgfdaalelecbifmkjccaefemaijddlmbkmlldhfbklljdedhahajhjmcmaglmbhjagjiifhkdbiehggajddkjchkbeddkahljjgefeffcbdlhkemmecdmbimdmamljhcicfiaambjehjmkjhfajadkeacgcadmcmfkbghbljbfiadkmaacabflejigcialheaibehjblkieaalbclbmhlfekgmggdakhicfaicceggahmidhemaibaiaabfhdjjifbdbkceicgdikhljdhimamghcgjljacdikilhcahedamkgfafhffmlifdeclkekmchmlbigjhijlmfejjjhcdfmjaggfllkdijhadlgfhiiikefglibjclhgedfdmeifeegeelmliefjfjldkdihciclagljcgajdmeijljfdhjkkajfckgaddeaakmjhhahkijhjhfjijamdeakeabfhfifdfkcejjfdgcjjlehkbmmbabiblgjkdhglgjgecfhicildemlaakikfbcdflejfgclmlclbldgldddclhjgdelfjdegbhglmhakdagmgkecdkeihdijijlkckjbammeiafkhmfjieflkcbhiggdjdeaiccaaaaildkmcffkhajefjakgjcglibjcejabfhlddimighmlcggbebbdlhbbjhikagificilmlcbidehkdfeimialijcbfmlgejldbleljgclfhiamhhgcgfjgcjgkmahkchbagfkkakcklefiimhekhckagcmcjadblhljjljdklcgidggmebmfifbfjcgcbhcgehkdikefecmhajjheaecjdiblhhcfcgfgdkjcfgjmhegahfeamclcmjemidkmkjfaecekchmkigdejeeiihlekgiggkcgmblaiblalacddicmehmjhlhmkfleaamamgbdaghdilgcjmfaklbcbldcmikakbmailkkjjlgjiaddfcbcfciladbeedhglebmefjgjfdhebjikbeldkmjldaekgjglbkiagkmlagblideedeehembjdliladifemkgchmlchlbjiaglmbikleclgeefhjlimalibckjgfjfgffhikllghbldhelgjmiifilgkkbdclkggijikbkieldgmggbjcgcfbjaedgclfahajlahllflihbkmakehbgdjbchdajigbdgiefaaadjkkjbjbekdfhaidjfgjgjablkggbagbbhmlkikdhblmfifldbmefjbljgkmdgbbcellefjgmbeladfjbibbjedccaebjakkadcmclihbgcfmjbdldmfcjifcaadibkfkdighjfhgjjaeifdebdkbjhbkibjimmmembkliildfbchbfablcmmjeigemdlkgbgbcfgibekbihkhklhkhkdacjlibkkdlbebbbdkkfdmlbijhammeeeejlfbheicdbcbgeeccfbabjlhadbhbhkmfgfichadjjiakjgagjadkkbggcjkbdciddjmflgedcihmgalkbehccmcagmmifcckcadgclbehhddbcaaiglachgdmhlammfhifahggigbkjblhlbedjldcjkfkglfjkjidciemkjkhkflfldkbhkjgcigdfdlblfkigalkijgmdmiabdiakbcfdldcmkkffihmemakiakfggadcjccckflemckgldjhiblgkhakfccbabfbjidhfmlbkjbedkfmhjjijijfbemffccmccckmhhaadcamfhmikmabkcmklbcikhkhfmdghhihllmekhefbdhgbdhldakljemeggdgabieebcklgkjmcgddhgfmkdbcafgkmhdjfkgdcfalkaadllcmglbkefkllhjghhdfdejbmfkcagaicfmigbdgaldjebejbhmggbkacickeidiimecglbdeeaceedgabballkmjjbjlkjgcjhiibbiflkggcgdemhimegghdjmlcbmhgmhblegehmecflcmmljakfidkmlbhjjdkhmccadkckalkgdiijmbgmceiejkmkabdbmikmlgabheidhbmdkdalhgfigafmccdhkggmbjabkdflckkflacecklaccmlailedldkkbddcjhbhldkimedlhblckbagdbcekmgicjaeemmjiljbiglfggfmgjmabcialkffdamjgfbgmjdfjgafjehdfcgideedgigalffjgcgdkbkfiijiaiglggdbmbflickgamjgghdllfjmhajmgleebdghejihmimlclfidcalfijmlbmejhijfgfjjhechfachlfekgacfmimhbalgcecaijajamchbfaghlljmaihfdajflhmhbgkmjdckdldfgmmcjijebafblikkklbheejfgfhfhmejgfmcakjdfdleejlmaahafgfikhjmlbjbbekbjlkkjflkagmhkfgabcildgfbdckelakmbckeigdddicbkacbfgdejjmegkcflhcajjmhlhkbccfgebhamhgfaggcdjgejcdfcjkcdmbijabjgfbfkgdbagmdflfhfjgaeimajljaamadglkmahjmfbbjhhkmdclcichackjdhmdmegfjdhghmhmkefhklgbjcdbmlblmjmkhcdbdmhhfkhicdlmidbgfcdiakgdmmlldfkafjeaegiifcbkgcbaghbcbcfdmkkalcibdahekgdhkflimafkdekmmdahmhedmakdahjidabhggegfcihkjieeffhefbfjfhemjfbmjfkjidgddimajdimjlljfjahiehafeijhmhilkekdcdiekimaicdfalkgemdjdijfdldajmhgdcmgkcdmmbaiceabkdmejfgdfdcgihibmahmkhmelihggeklgamcecifigekhimdbgkhddlhaeimmgleiikjcjkijfkblgemmefecdahbeckgjjfklmlekkgjlccjfgblkkibljfegbdifcjgdmecglilcmibbdcbficdbheclcejcbagfhgmihamehmligjbmaccimbmejdcabmacfabkkfkacffhhbdechlbgeifjmbkbhdikhahkebafjjkjcejcaciagahjghhjhkeefhjjcfmmahfdkhchhklegjlbbbcdlfcclflgfiibljmbbjhkdjdleegekccaejbhejikkchmmfjejjljiggieabmefajhkgkledgkkejibmbahhehmfdakcfbhemdmemjbgjfgbfgdlflbhkmfackkceeigejdaggfidmfcdaccmmhlmifdddgagmfmejhfbaicccdeijbhefabejkghlmckfdbkjddgdakldccfdgjdghcdhdhjdlkgccehhlbjbkkmeceihgcmiklblkabfmmilicjilgehfhbdihmikgckieggbbbbmmcakkadfbbcffeaijfjmalmlfbdbjdckkfmbefihjiefhfgldmgahmlbgkcdeachjfjccjlcicfleblfdekilcfkgjefflhjckakgkfkdeikhjflddgebmhiiidcdhifhefcdableckklcmiekdgmlcdhjfljlcdbcafekbecaeemgjfcdjhfgeimddmaafihgffmfjmledefikjhefakdiabbkfjkfahhljklagjfbjhjbbcgejbaalhcjdcgfdcbkkjaemmmfgmbdadfmdiaifdmfgfmecdcbkcmbfcgmachffflaicadkjkdekbcidbkcbfdikfdmjlailmgalabejgldcdmfalhakmlgfblikgcaicdmkaiacehchjhkfjflkmfkclibdcljhhgmiecekecdbcemfahfheejmmiljemkdfflfiaijlkilhaeejackljkccllahkfhebmcbimmmbiabaalmdhiebefchkbabgkfmiabdfiaglgbaemmggdebjgbdchakdgekgekflmkllabadegfmegjhkgflelilhghalmmhimelmfcjgiabkbckkkeedbldbdhhmiclfjekmhhhfcfglclgglmifjihfgfgjgalhhbgbahbdfbdmjdlglicjhahljkejkcafdlikahemllljhgkeeiblkhfkjalgflcdlidkdceiefgjlifllchkhdmekimflfakiahbliflilkcmiihhckilkgkhlekfaikkjklbjjfabdfjeiikkibflgaediekjdiaiabileafkehimhbhbmmhcbdgfhiigbdebimecfhllaggdhlmfhijiekaaaffhmimejjcahhckhjmiamgbblkbjdhmmcccidcifmkkjhejicfmegclemfidelicjambgmkjeabffahiemehkglhmfilcbfiglfhfdemebkbmmeeimkadekmelffemllaachaemkikkemehfjkhmdfdkakdgbimedmmckidamlgdfeibkgickhldagfhflmecdmcglifedaeabfckjlkigecfhejlaicfifbffjmejhfbikflickdjadjjfdcglbhljbabefcammkicdlfbiklbjbkjhdcdbfafjleibdhjdcabjlfcddikhjbbchdffjdmdbkmgdafcbjchihjgiiijcgjmjkaahbdhljhfcmljhcaakickjdjifljmhebgkdhlhaadjimhemgbbegcjbgiafbmleklgahdamiegbfkekjkgkejbmlflkkdgkieecgkjhafblgkhhbkdbbfgkggccbgdchflkkcbakhcdkdbiailcighigcdedjekhmhihblgiiciffikaahghababklkegihiflmdahhgjmgbdjgclmjdlgcgeghffmdcahkilbajkggdbdijccmjbbdkhjmefeehfcadgeemghibiiimabmimhhdfffdejjibekdlkjghkhhhaaeemheedhkigcljkfjjmikaaaegjdkiefibcabelijmkgkkchjkaadfhjhackmbjelieefmljfbhkimkifigicmcfiidfcebmeadcagdikcmjcgkcfihdgmkeeigibjidghjmcaeccihdhljcmbdbellbdhfakhmdkjgbcgdkcaefdfkmamfjgkhkdemlmijjfichfkdhejchmmbggedmhifklkckaiciicibcemfhbjbcleljbcdelmbkheafbmddbgdamafgkachfedgahkllkekifldahlmeljkgekljeecmbbidkfhkfkdbkjbljbgbbabmfcbagbebdjiccjgciefkghmclijjhgcjeailbbbbcmjgjcgglggeckdmdmdhhjlgdkijbdefadcklcbjkghahlhafelbbhaeehecbckcdmfkiiadkkcaghbafejclbmbjhddhfibafligideflgdjfleehllfdbacibdbhejbcjldiemhccimgidkmfmgmdihgeelbalfmgghkaecfeijfblghabbkejbmackmkjffbdimccakldblefljbbddbaedjbibhafdjmlflfbgefjcghlgmalbjjbgbgdmbhghajblalbaacdiibhcblijgjcbjbfmedmiahlibbbdidlcelelklflemiemklfdckillga"
maxLetters = 6
minSize = 5
maxSize = 26
print(len(s))
print(sol.maxFreq(s, maxLetters, minSize, maxSize))
| 363.229508
| 10,006
| 0.971837
| 136
| 22,157
| 158.279412
| 0.367647
| 0.002091
| 0.001812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001247
| 0.022927
| 22,157
| 61
| 10,007
| 363.229508
| 0.993071
| 0.000812
| 0
| 0.045455
| 0
| 0
| 0.948852
| 0.948852
| 0
| 1
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0
| 0
| 0.181818
| 0.045455
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cf50f93bd8ae6d0b9850d85104a513ea858110bf
| 33
|
py
|
Python
|
test.py
|
TheCulliganMan/ether_watch
|
ce8c56976bad77cab2a4b3511fe3ceea6085b4cf
|
[
"MIT"
] | null | null | null |
test.py
|
TheCulliganMan/ether_watch
|
ce8c56976bad77cab2a4b3511fe3ceea6085b4cf
|
[
"MIT"
] | null | null | null |
test.py
|
TheCulliganMan/ether_watch
|
ce8c56976bad77cab2a4b3511fe3ceea6085b4cf
|
[
"MIT"
] | null | null | null |
import .etherwatch
print(main())
| 11
| 18
| 0.757576
| 4
| 33
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 33
| 2
| 19
| 16.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
cf50ff81a62ce68b1342250fbc363f80d83ab94d
| 81
|
py
|
Python
|
sortingview/experimental/__init__.py
|
magland/sortingview
|
0b1be9d55048cd4b8a0b6b6733bd7d35cb440aa7
|
[
"Apache-2.0"
] | 2
|
2021-11-19T04:51:42.000Z
|
2022-03-12T23:36:19.000Z
|
sortingview/experimental/__init__.py
|
magland/sortingview
|
0b1be9d55048cd4b8a0b6b6733bd7d35cb440aa7
|
[
"Apache-2.0"
] | 172
|
2021-05-10T17:39:15.000Z
|
2022-03-18T21:46:15.000Z
|
sortingview/experimental/__init__.py
|
magland/sortingview
|
0b1be9d55048cd4b8a0b6b6733bd7d35cb440aa7
|
[
"Apache-2.0"
] | 2
|
2021-08-29T20:13:57.000Z
|
2022-03-12T23:36:34.000Z
|
from .load_recording import load_recording
from .load_sorting import load_sorting
| 40.5
| 42
| 0.888889
| 12
| 81
| 5.666667
| 0.416667
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08642
| 81
| 2
| 43
| 40.5
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cf7dc9a82d51c0f19eb3af7ed066ed85be0b6744
| 334
|
py
|
Python
|
models/__init__.py
|
huangleiBuaa/XBNBlock
|
c33650aa470fc5fd6da9b9f5491002b1bd70be2f
|
[
"BSD-2-Clause"
] | 13
|
2022-03-23T17:52:53.000Z
|
2022-03-30T12:57:34.000Z
|
models/__init__.py
|
huangleiBuaa/XBNBlock
|
c33650aa470fc5fd6da9b9f5491002b1bd70be2f
|
[
"BSD-2-Clause"
] | null | null | null |
models/__init__.py
|
huangleiBuaa/XBNBlock
|
c33650aa470fc5fd6da9b9f5491002b1bd70be2f
|
[
"BSD-2-Clause"
] | 1
|
2022-03-25T02:16:00.000Z
|
2022-03-25T02:16:00.000Z
|
from .resnet import *
from .resnet_XBNBlock_P1 import *
from .resnet_XBNBlock_P2 import *
from .resnet_XBNBlock_P3 import *
from .resnext_XBNBlock_P2 import *
from .resnext import *
from .resnext_forGN import *
from .mobilenetV2 import *
from .mobilenetV2_XBNBlock import *
from .shuffleV2 import *
from .shuffleV2_XBNBlock import *
| 25.692308
| 35
| 0.799401
| 44
| 334
| 5.818182
| 0.25
| 0.390625
| 0.1875
| 0.28125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027682
| 0.134731
| 334
| 12
| 36
| 27.833333
| 0.858131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d872011661142c20ba20eced2cb7c4c0d63041ef
| 235
|
py
|
Python
|
models/news_model.py
|
ismdeep/jxust-news-monitor
|
a5dc6ba689a21e85e9e44847dbbd4d6ed02a3d34
|
[
"MIT"
] | null | null | null |
models/news_model.py
|
ismdeep/jxust-news-monitor
|
a5dc6ba689a21e85e9e44847dbbd4d6ed02a3d34
|
[
"MIT"
] | null | null | null |
models/news_model.py
|
ismdeep/jxust-news-monitor
|
a5dc6ba689a21e85e9e44847dbbd4d6ed02a3d34
|
[
"MIT"
] | null | null | null |
class NewsModel:
title = None
url = None
def __init__(self, __title__, __url__):
self.title = __title__
self.url = __url__
def __str__(self):
return '''["%s", "%s"]''' % (self.title, self.url)
| 21.363636
| 58
| 0.557447
| 27
| 235
| 3.962963
| 0.407407
| 0.252336
| 0.224299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.289362
| 235
| 10
| 59
| 23.5
| 0.640719
| 0
| 0
| 0
| 0
| 0
| 0.051064
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.125
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
d89d9a6538f430ec2c12f02401156cb0d4fedbdf
| 61
|
py
|
Python
|
acq4/devices/Scanner/__init__.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 47
|
2015-01-05T16:18:10.000Z
|
2022-03-16T13:09:30.000Z
|
acq4/devices/Scanner/__init__.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 48
|
2015-04-19T16:51:41.000Z
|
2022-03-31T14:48:16.000Z
|
acq4/devices/Scanner/__init__.py
|
sensapex/acq4
|
9561ba73caff42c609bd02270527858433862ad8
|
[
"MIT"
] | 32
|
2015-01-15T14:11:49.000Z
|
2021-07-15T13:44:52.000Z
|
from __future__ import print_function
from .Scanner import *
| 20.333333
| 37
| 0.836066
| 8
| 61
| 5.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131148
| 61
| 2
| 38
| 30.5
| 0.867925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
d8b16fd2d43764b728355c8d455e183a202ed9d8
| 2,154
|
py
|
Python
|
tests/user_message_test.py
|
zixia/python-wechaty
|
1aa6cf12f0d050761c58b016a4f3a8341373e5d2
|
[
"Apache-2.0"
] | 1
|
2021-12-10T06:48:33.000Z
|
2021-12-10T06:48:33.000Z
|
tests/user_message_test.py
|
zixia/python-wechaty
|
1aa6cf12f0d050761c58b016a4f3a8341373e5d2
|
[
"Apache-2.0"
] | 1
|
2022-01-17T04:02:36.000Z
|
2022-01-17T04:02:36.000Z
|
tests/user_message_test.py
|
zixia/python-wechaty
|
1aa6cf12f0d050761c58b016a4f3a8341373e5d2
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from wechaty.wechaty import Wechaty
@pytest.mark.asyncio
async def test_mention_text_without_mentions(test_bot: Wechaty) -> None:
"""Test extracting mention text from a message without mentions"""
msg = await test_bot.Message.find(message_id="no_mention")
await msg.ready()
text = await msg.mention_text()
assert text == 'foo bar asd'
@pytest.mark.asyncio
async def test_mention_text_without_mentions_in_room(test_bot: Wechaty) -> None:
"""Test extracting mention text from a message without mentions"""
msg = await test_bot.Message.find(message_id="room_no_mention")
await msg.ready()
text = await msg.mention_text()
assert text == 'beep'
@pytest.mark.asyncio
async def test_mention_text_with_mentions_in_room(test_bot: Wechaty) -> None:
"""Test extracting mention text from a message without mentions"""
msg = await test_bot.Message.find(message_id="room_with_mentions")
await msg.ready()
text = await msg.mention_text()
assert text == 'test message asd'
@pytest.mark.asyncio
async def test_mention_text_with_mentions_and_alias_in_room(test_bot: Wechaty) -> None:
"""Test extracting mention text from a message without mentions"""
msg = await test_bot.Message.find(message_id="room_with_mentions_and_alias")
await msg.ready()
text = await msg.mention_text()
assert text == '123123 kkasd'
@pytest.mark.asyncio
async def test_mention_text_with_mentions_and_mismatched_alias(test_bot: Wechaty) -> None:
"""Test extracting mention text from a message without mentions"""
msg = await test_bot.Message.find(message_id="room_with_mentions_and_alias_mismatched")
await msg.ready()
text = await msg.mention_text()
assert text == '123123@Fake User beep'
@pytest.mark.asyncio
async def test_mention_text_with_mentions_but_not_mention_data(test_bot: Wechaty) -> None:
"""Test extracting mention text from a message without mentions"""
msg = await test_bot.Message.find(message_id="room_with_text_mentions")
await msg.ready()
text = await msg.mention_text()
assert text == '@Wechaty User @Test User @Fake Alias beep!!'
| 37.789474
| 91
| 0.748375
| 311
| 2,154
| 4.926045
| 0.135048
| 0.129243
| 0.06658
| 0.086162
| 0.898172
| 0.898172
| 0.898172
| 0.898172
| 0.898172
| 0.894256
| 0
| 0.006583
| 0.153668
| 2,154
| 56
| 92
| 38.464286
| 0.83379
| 0
| 0
| 0.473684
| 0
| 0
| 0.136519
| 0.051195
| 0
| 0
| 0
| 0
| 0.157895
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d8b27b02a0810630d1da2294aef39c2b7af46094
| 113
|
py
|
Python
|
freenom_dns_updater/exception/add_error.py
|
anhdhbn/Freenom-dns-updater
|
ec928755d7e18efa00bcc9aed20ad0b3eb093239
|
[
"MIT"
] | 160
|
2016-02-27T15:20:24.000Z
|
2022-03-13T17:27:49.000Z
|
freenom_dns_updater/exception/add_error.py
|
anhdhbn/Freenom-dns-updater
|
ec928755d7e18efa00bcc9aed20ad0b3eb093239
|
[
"MIT"
] | 31
|
2016-02-12T21:25:35.000Z
|
2022-03-03T19:24:59.000Z
|
freenom_dns_updater/exception/add_error.py
|
anhdhbn/Freenom-dns-updater
|
ec928755d7e18efa00bcc9aed20ad0b3eb093239
|
[
"MIT"
] | 56
|
2016-03-05T14:39:21.000Z
|
2022-02-11T01:21:15.000Z
|
from .dns_record_base_exception import DnsRecordBaseException
class AddError(DnsRecordBaseException):
pass
| 18.833333
| 61
| 0.849558
| 11
| 113
| 8.454545
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115044
| 113
| 5
| 62
| 22.6
| 0.93
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
d8f732cb67734321bc333dc189fe2106d1f622ee
| 51,726
|
py
|
Python
|
nova/network/security_group/neutron_driver.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/network/security_group/neutron_driver.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/network/security_group/neutron_driver.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | 2
|
2017-07-20T17:31:34.000Z
|
2020-07-24T02:42:19.000Z
|
begin_unit
comment|'# Copyright 2013 Nicira, Inc.'
nl|'\n'
comment|'# All Rights Reserved'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'sys'
newline|'\n'
nl|'\n'
name|'from'
name|'neutronclient'
op|'.'
name|'common'
name|'import'
name|'exceptions'
name|'as'
name|'n_exc'
newline|'\n'
name|'from'
name|'neutronclient'
op|'.'
name|'neutron'
name|'import'
name|'v2_0'
name|'as'
name|'neutronv20'
newline|'\n'
name|'from'
name|'oslo_log'
name|'import'
name|'log'
name|'as'
name|'logging'
newline|'\n'
name|'from'
name|'oslo_utils'
name|'import'
name|'excutils'
newline|'\n'
name|'from'
name|'oslo_utils'
name|'import'
name|'uuidutils'
newline|'\n'
name|'import'
name|'six'
newline|'\n'
name|'from'
name|'webob'
name|'import'
name|'exc'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'compute'
name|'import'
name|'api'
name|'as'
name|'compute_api'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'i18n'
name|'import'
name|'_'
op|','
name|'_LE'
op|','
name|'_LI'
op|','
name|'_LW'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'network'
op|'.'
name|'neutronv2'
name|'import'
name|'api'
name|'as'
name|'neutronapi'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'network'
op|'.'
name|'security_group'
name|'import'
name|'security_group_base'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'utils'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|LOG
name|'LOG'
op|'='
name|'logging'
op|'.'
name|'getLogger'
op|'('
name|'__name__'
op|')'
newline|'\n'
nl|'\n'
comment|'# NOTE: Neutron client has a max URL length of 8192, so we have'
nl|'\n'
comment|'# to limit the number of IDs we include in any single search. Really'
nl|'\n'
comment|"# doesn't seem to be any point in making this a config value."
nl|'\n'
DECL|variable|MAX_SEARCH_IDS
name|'MAX_SEARCH_IDS'
op|'='
number|'150'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|SecurityGroupAPI
name|'class'
name|'SecurityGroupAPI'
op|'('
name|'security_group_base'
op|'.'
name|'SecurityGroupBase'
op|')'
op|':'
newline|'\n'
nl|'\n'
DECL|variable|id_is_uuid
indent|' '
name|'id_is_uuid'
op|'='
name|'True'
newline|'\n'
nl|'\n'
DECL|member|create_security_group
name|'def'
name|'create_security_group'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'name'
op|','
name|'description'
op|')'
op|':'
newline|'\n'
indent|' '
name|'neutron'
op|'='
name|'neutronapi'
op|'.'
name|'get_client'
op|'('
name|'context'
op|')'
newline|'\n'
name|'body'
op|'='
name|'self'
op|'.'
name|'_make_neutron_security_group_dict'
op|'('
name|'name'
op|','
name|'description'
op|')'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'security_group'
op|'='
name|'neutron'
op|'.'
name|'create_security_group'
op|'('
nl|'\n'
name|'body'
op|')'
op|'.'
name|'get'
op|'('
string|"'security_group'"
op|')'
newline|'\n'
dedent|''
name|'except'
name|'n_exc'
op|'.'
name|'BadRequest'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'Invalid'
op|'('
name|'six'
op|'.'
name|'text_type'
op|'('
name|'e'
op|')'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'n_exc'
op|'.'
name|'NeutronClientException'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'exc_info'
op|'='
name|'sys'
op|'.'
name|'exc_info'
op|'('
op|')'
newline|'\n'
name|'LOG'
op|'.'
name|'exception'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error creating security group %s"'
op|')'
op|','
nl|'\n'
name|'name'
op|')'
newline|'\n'
name|'if'
name|'e'
op|'.'
name|'status_code'
op|'=='
number|'401'
op|':'
newline|'\n'
comment|'# TODO(arosen) Cannot raise generic response from neutron here'
nl|'\n'
comment|'# as this error code could be related to bad input or over'
nl|'\n'
comment|'# quota'
nl|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|'('
op|')'
newline|'\n'
dedent|''
name|'elif'
name|'e'
op|'.'
name|'status_code'
op|'=='
number|'409'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'raise_over_quota'
op|'('
name|'six'
op|'.'
name|'text_type'
op|'('
name|'e'
op|')'
op|')'
newline|'\n'
dedent|''
name|'six'
op|'.'
name|'reraise'
op|'('
op|'*'
name|'exc_info'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'self'
op|'.'
name|'_convert_to_nova_security_group_format'
op|'('
name|'security_group'
op|')'
newline|'\n'
nl|'\n'
DECL|member|update_security_group
dedent|''
name|'def'
name|'update_security_group'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'security_group'
op|','
nl|'\n'
name|'name'
op|','
name|'description'
op|')'
op|':'
newline|'\n'
indent|' '
name|'neutron'
op|'='
name|'neutronapi'
op|'.'
name|'get_client'
op|'('
name|'context'
op|')'
newline|'\n'
name|'body'
op|'='
name|'self'
op|'.'
name|'_make_neutron_security_group_dict'
op|'('
name|'name'
op|','
name|'description'
op|')'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'security_group'
op|'='
name|'neutron'
op|'.'
name|'update_security_group'
op|'('
nl|'\n'
name|'security_group'
op|'['
string|"'id'"
op|']'
op|','
name|'body'
op|')'
op|'.'
name|'get'
op|'('
string|"'security_group'"
op|')'
newline|'\n'
dedent|''
name|'except'
name|'n_exc'
op|'.'
name|'NeutronClientException'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'exc_info'
op|'='
name|'sys'
op|'.'
name|'exc_info'
op|'('
op|')'
newline|'\n'
name|'LOG'
op|'.'
name|'exception'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error updating security group %s"'
op|')'
op|','
nl|'\n'
name|'name'
op|')'
newline|'\n'
name|'if'
name|'e'
op|'.'
name|'status_code'
op|'=='
number|'401'
op|':'
newline|'\n'
comment|'# TODO(arosen) Cannot raise generic response from neutron here'
nl|'\n'
comment|'# as this error code could be related to bad input or over'
nl|'\n'
comment|'# quota'
nl|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|'('
op|')'
newline|'\n'
dedent|''
name|'six'
op|'.'
name|'reraise'
op|'('
op|'*'
name|'exc_info'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'self'
op|'.'
name|'_convert_to_nova_security_group_format'
op|'('
name|'security_group'
op|')'
newline|'\n'
nl|'\n'
DECL|member|validate_property
dedent|''
name|'def'
name|'validate_property'
op|'('
name|'self'
op|','
name|'value'
op|','
name|'property'
op|','
name|'allowed'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Validate given security group property.\n\n :param value: the value to validate, as a string or unicode\n :param property: the property, either \'name\' or \'description\'\n :param allowed: the range of characters allowed, but not used because\n Neutron is allowing any characters.\n """'
newline|'\n'
nl|'\n'
comment|'# NOTE: If using nova-network as the backend, min_length is 1. However'
nl|'\n'
comment|'# if using Neutron, Nova has allowed empty string as its history.'
nl|'\n'
comment|'# So this min_length should be 0 for passing the existing requests.'
nl|'\n'
name|'utils'
op|'.'
name|'check_string_length'
op|'('
name|'value'
op|','
name|'name'
op|'='
name|'property'
op|','
name|'min_length'
op|'='
number|'0'
op|','
nl|'\n'
name|'max_length'
op|'='
number|'255'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_convert_to_nova_security_group_format
dedent|''
name|'def'
name|'_convert_to_nova_security_group_format'
op|'('
name|'self'
op|','
name|'security_group'
op|')'
op|':'
newline|'\n'
indent|' '
name|'nova_group'
op|'='
op|'{'
op|'}'
newline|'\n'
name|'nova_group'
op|'['
string|"'id'"
op|']'
op|'='
name|'security_group'
op|'['
string|"'id'"
op|']'
newline|'\n'
name|'nova_group'
op|'['
string|"'description'"
op|']'
op|'='
name|'security_group'
op|'['
string|"'description'"
op|']'
newline|'\n'
name|'nova_group'
op|'['
string|"'name'"
op|']'
op|'='
name|'security_group'
op|'['
string|"'name'"
op|']'
newline|'\n'
name|'nova_group'
op|'['
string|"'project_id'"
op|']'
op|'='
name|'security_group'
op|'['
string|"'tenant_id'"
op|']'
newline|'\n'
name|'nova_group'
op|'['
string|"'rules'"
op|']'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'rule'
name|'in'
name|'security_group'
op|'.'
name|'get'
op|'('
string|"'security_group_rules'"
op|','
op|'['
op|']'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'rule'
op|'['
string|"'direction'"
op|']'
op|'=='
string|"'ingress'"
op|':'
newline|'\n'
indent|' '
name|'nova_group'
op|'['
string|"'rules'"
op|']'
op|'.'
name|'append'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'_convert_to_nova_security_group_rule_format'
op|'('
name|'rule'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'return'
name|'nova_group'
newline|'\n'
nl|'\n'
DECL|member|_convert_to_nova_security_group_rule_format
dedent|''
name|'def'
name|'_convert_to_nova_security_group_rule_format'
op|'('
name|'self'
op|','
name|'rule'
op|')'
op|':'
newline|'\n'
indent|' '
name|'nova_rule'
op|'='
op|'{'
op|'}'
newline|'\n'
name|'nova_rule'
op|'['
string|"'id'"
op|']'
op|'='
name|'rule'
op|'['
string|"'id'"
op|']'
newline|'\n'
name|'nova_rule'
op|'['
string|"'parent_group_id'"
op|']'
op|'='
name|'rule'
op|'['
string|"'security_group_id'"
op|']'
newline|'\n'
name|'nova_rule'
op|'['
string|"'protocol'"
op|']'
op|'='
name|'rule'
op|'['
string|"'protocol'"
op|']'
newline|'\n'
name|'if'
op|'('
name|'nova_rule'
op|'['
string|"'protocol'"
op|']'
name|'and'
name|'rule'
op|'.'
name|'get'
op|'('
string|"'port_range_min'"
op|')'
name|'is'
name|'None'
name|'and'
nl|'\n'
name|'rule'
op|'.'
name|'get'
op|'('
string|"'port_range_max'"
op|')'
name|'is'
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'rule'
op|'['
string|"'protocol'"
op|']'
op|'.'
name|'upper'
op|'('
op|')'
name|'in'
op|'['
string|"'TCP'"
op|','
string|"'UDP'"
op|']'
op|':'
newline|'\n'
indent|' '
name|'nova_rule'
op|'['
string|"'from_port'"
op|']'
op|'='
number|'1'
newline|'\n'
name|'nova_rule'
op|'['
string|"'to_port'"
op|']'
op|'='
number|'65535'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'nova_rule'
op|'['
string|"'from_port'"
op|']'
op|'='
op|'-'
number|'1'
newline|'\n'
name|'nova_rule'
op|'['
string|"'to_port'"
op|']'
op|'='
op|'-'
number|'1'
newline|'\n'
dedent|''
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'nova_rule'
op|'['
string|"'from_port'"
op|']'
op|'='
name|'rule'
op|'.'
name|'get'
op|'('
string|"'port_range_min'"
op|')'
newline|'\n'
name|'nova_rule'
op|'['
string|"'to_port'"
op|']'
op|'='
name|'rule'
op|'.'
name|'get'
op|'('
string|"'port_range_max'"
op|')'
newline|'\n'
dedent|''
name|'nova_rule'
op|'['
string|"'group_id'"
op|']'
op|'='
name|'rule'
op|'['
string|"'remote_group_id'"
op|']'
newline|'\n'
name|'nova_rule'
op|'['
string|"'cidr'"
op|']'
op|'='
name|'self'
op|'.'
name|'parse_cidr'
op|'('
name|'rule'
op|'.'
name|'get'
op|'('
string|"'remote_ip_prefix'"
op|')'
op|')'
newline|'\n'
name|'return'
name|'nova_rule'
newline|'\n'
nl|'\n'
DECL|member|get
dedent|''
name|'def'
name|'get'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'name'
op|'='
name|'None'
op|','
name|'id'
op|'='
name|'None'
op|','
name|'map_exception'
op|'='
name|'False'
op|')'
op|':'
newline|'\n'
indent|' '
name|'neutron'
op|'='
name|'neutronapi'
op|'.'
name|'get_client'
op|'('
name|'context'
op|')'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'not'
name|'id'
name|'and'
name|'name'
op|':'
newline|'\n'
comment|'# NOTE(flwang): The project id should be honoured so as to get'
nl|'\n'
comment|'# the correct security group id when user(with admin role but'
nl|'\n'
comment|'# non-admin project) try to query by name, so as to avoid'
nl|'\n'
comment|'# getting more than duplicated records with the same name.'
nl|'\n'
indent|' '
name|'id'
op|'='
name|'neutronv20'
op|'.'
name|'find_resourceid_by_name_or_id'
op|'('
nl|'\n'
name|'neutron'
op|','
string|"'security_group'"
op|','
name|'name'
op|','
name|'context'
op|'.'
name|'project_id'
op|')'
newline|'\n'
dedent|''
name|'group'
op|'='
name|'neutron'
op|'.'
name|'show_security_group'
op|'('
name|'id'
op|')'
op|'.'
name|'get'
op|'('
string|"'security_group'"
op|')'
newline|'\n'
name|'return'
name|'self'
op|'.'
name|'_convert_to_nova_security_group_format'
op|'('
name|'group'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'n_exc'
op|'.'
name|'NeutronClientNoUniqueMatch'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'NoUniqueMatch'
op|'('
name|'six'
op|'.'
name|'text_type'
op|'('
name|'e'
op|')'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'n_exc'
op|'.'
name|'NeutronClientException'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'exc_info'
op|'='
name|'sys'
op|'.'
name|'exc_info'
op|'('
op|')'
newline|'\n'
name|'if'
name|'e'
op|'.'
name|'status_code'
op|'=='
number|'404'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'debug'
op|'('
string|'"Neutron security group %s not found"'
op|','
name|'name'
op|')'
newline|'\n'
name|'raise'
name|'exception'
op|'.'
name|'SecurityGroupNotFound'
op|'('
name|'six'
op|'.'
name|'text_type'
op|'('
name|'e'
op|')'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'error'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error: %s"'
op|')'
op|','
name|'e'
op|')'
newline|'\n'
name|'six'
op|'.'
name|'reraise'
op|'('
op|'*'
name|'exc_info'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'except'
name|'TypeError'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'error'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error: %s"'
op|')'
op|','
name|'e'
op|')'
newline|'\n'
name|'msg'
op|'='
name|'_'
op|'('
string|'"Invalid security group name: %(name)s."'
op|')'
op|'%'
op|'{'
string|'"name"'
op|':'
name|'name'
op|'}'
newline|'\n'
name|'raise'
name|'exception'
op|'.'
name|'SecurityGroupNotFound'
op|'('
name|'six'
op|'.'
name|'text_type'
op|'('
name|'msg'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|list
dedent|''
dedent|''
name|'def'
name|'list'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'names'
op|'='
name|'None'
op|','
name|'ids'
op|'='
name|'None'
op|','
name|'project'
op|'='
name|'None'
op|','
nl|'\n'
name|'search_opts'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns list of security group rules owned by tenant."""'
newline|'\n'
name|'neutron'
op|'='
name|'neutronapi'
op|'.'
name|'get_client'
op|'('
name|'context'
op|')'
newline|'\n'
name|'params'
op|'='
op|'{'
op|'}'
newline|'\n'
name|'search_opts'
op|'='
name|'search_opts'
name|'if'
name|'search_opts'
name|'else'
op|'{'
op|'}'
newline|'\n'
name|'if'
name|'names'
op|':'
newline|'\n'
indent|' '
name|'params'
op|'['
string|"'name'"
op|']'
op|'='
name|'names'
newline|'\n'
dedent|''
name|'if'
name|'ids'
op|':'
newline|'\n'
indent|' '
name|'params'
op|'['
string|"'id'"
op|']'
op|'='
name|'ids'
newline|'\n'
nl|'\n'
comment|'# NOTE(jeffrey4l): list all the security groups when following'
nl|'\n'
comment|'# conditions are met'
nl|'\n'
comment|"# * names and ids don't exist."
nl|'\n'
comment|'# * it is admin context and all_tenants exist in search_opts.'
nl|'\n'
comment|'# * project is not specified.'
nl|'\n'
dedent|''
name|'list_all_tenants'
op|'='
op|'('
name|'context'
op|'.'
name|'is_admin'
nl|'\n'
name|'and'
string|"'all_tenants'"
name|'in'
name|'search_opts'
nl|'\n'
name|'and'
name|'not'
name|'any'
op|'('
op|'['
name|'names'
op|','
name|'ids'
op|']'
op|')'
op|')'
newline|'\n'
comment|"# NOTE(jeffrey4l): The neutron doesn't have `all-tenants` concept."
nl|'\n'
comment|'# All the security group will be returned if the project/tenant'
nl|'\n'
comment|'# id is not passed.'
nl|'\n'
name|'if'
name|'project'
name|'and'
name|'not'
name|'list_all_tenants'
op|':'
newline|'\n'
indent|' '
name|'params'
op|'['
string|"'tenant_id'"
op|']'
op|'='
name|'project'
newline|'\n'
dedent|''
name|'try'
op|':'
newline|'\n'
indent|' '
name|'security_groups'
op|'='
name|'neutron'
op|'.'
name|'list_security_groups'
op|'('
op|'**'
name|'params'
op|')'
op|'.'
name|'get'
op|'('
nl|'\n'
string|"'security_groups'"
op|')'
newline|'\n'
dedent|''
name|'except'
name|'n_exc'
op|'.'
name|'NeutronClientException'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'excutils'
op|'.'
name|'save_and_reraise_exception'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'exception'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error getting security groups"'
op|')'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'converted_rules'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'security_group'
name|'in'
name|'security_groups'
op|':'
newline|'\n'
indent|' '
name|'converted_rules'
op|'.'
name|'append'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'_convert_to_nova_security_group_format'
op|'('
name|'security_group'
op|')'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'converted_rules'
newline|'\n'
nl|'\n'
DECL|member|validate_id
dedent|''
name|'def'
name|'validate_id'
op|'('
name|'self'
op|','
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'not'
name|'uuidutils'
op|'.'
name|'is_uuid_like'
op|'('
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
name|'_'
op|'('
string|'"Security group id should be uuid"'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'raise_invalid_property'
op|'('
name|'msg'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'id'
newline|'\n'
nl|'\n'
DECL|member|destroy
dedent|''
name|'def'
name|'destroy'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'security_group'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""This function deletes a security group."""'
newline|'\n'
nl|'\n'
name|'neutron'
op|'='
name|'neutronapi'
op|'.'
name|'get_client'
op|'('
name|'context'
op|')'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'neutron'
op|'.'
name|'delete_security_group'
op|'('
name|'security_group'
op|'['
string|"'id'"
op|']'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'n_exc'
op|'.'
name|'NeutronClientException'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'exc_info'
op|'='
name|'sys'
op|'.'
name|'exc_info'
op|'('
op|')'
newline|'\n'
name|'if'
name|'e'
op|'.'
name|'status_code'
op|'=='
number|'404'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'raise_not_found'
op|'('
name|'six'
op|'.'
name|'text_type'
op|'('
name|'e'
op|')'
op|')'
newline|'\n'
dedent|''
name|'elif'
name|'e'
op|'.'
name|'status_code'
op|'=='
number|'409'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'raise_invalid_property'
op|'('
name|'six'
op|'.'
name|'text_type'
op|'('
name|'e'
op|')'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'error'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error: %s"'
op|')'
op|','
name|'e'
op|')'
newline|'\n'
name|'six'
op|'.'
name|'reraise'
op|'('
op|'*'
name|'exc_info'
op|')'
newline|'\n'
nl|'\n'
DECL|member|add_rules
dedent|''
dedent|''
dedent|''
name|'def'
name|'add_rules'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'id'
op|','
name|'name'
op|','
name|'vals'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Add security group rule(s) to security group.\n\n Note: the Nova security group API doesn\'t support adding multiple\n security group rules at once but the EC2 one does. Therefore,\n this function is written to support both. Multiple rules are\n installed to a security group in neutron using bulk support.\n """'
newline|'\n'
nl|'\n'
name|'neutron'
op|'='
name|'neutronapi'
op|'.'
name|'get_client'
op|'('
name|'context'
op|')'
newline|'\n'
name|'body'
op|'='
name|'self'
op|'.'
name|'_make_neutron_security_group_rules_list'
op|'('
name|'vals'
op|')'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'rules'
op|'='
name|'neutron'
op|'.'
name|'create_security_group_rule'
op|'('
nl|'\n'
name|'body'
op|')'
op|'.'
name|'get'
op|'('
string|"'security_group_rules'"
op|')'
newline|'\n'
dedent|''
name|'except'
name|'n_exc'
op|'.'
name|'NeutronClientException'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'exc_info'
op|'='
name|'sys'
op|'.'
name|'exc_info'
op|'('
op|')'
newline|'\n'
name|'if'
name|'e'
op|'.'
name|'status_code'
op|'=='
number|'404'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'exception'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error getting security group %s"'
op|')'
op|','
nl|'\n'
name|'name'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'raise_not_found'
op|'('
name|'six'
op|'.'
name|'text_type'
op|'('
name|'e'
op|')'
op|')'
newline|'\n'
dedent|''
name|'elif'
name|'e'
op|'.'
name|'status_code'
op|'=='
number|'409'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'exception'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error adding rules to security "'
nl|'\n'
string|'"group %s"'
op|')'
op|','
name|'name'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'raise_over_quota'
op|'('
name|'six'
op|'.'
name|'text_type'
op|'('
name|'e'
op|')'
op|')'
newline|'\n'
dedent|''
name|'elif'
name|'e'
op|'.'
name|'status_code'
op|'=='
number|'400'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'exception'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error: %s"'
op|')'
op|','
name|'six'
op|'.'
name|'text_type'
op|'('
name|'e'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'raise_invalid_property'
op|'('
name|'six'
op|'.'
name|'text_type'
op|'('
name|'e'
op|')'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'exception'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error:"'
op|')'
op|')'
newline|'\n'
name|'six'
op|'.'
name|'reraise'
op|'('
op|'*'
name|'exc_info'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'converted_rules'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'rule'
name|'in'
name|'rules'
op|':'
newline|'\n'
indent|' '
name|'converted_rules'
op|'.'
name|'append'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'_convert_to_nova_security_group_rule_format'
op|'('
name|'rule'
op|')'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'converted_rules'
newline|'\n'
nl|'\n'
DECL|member|_make_neutron_security_group_dict
dedent|''
name|'def'
name|'_make_neutron_security_group_dict'
op|'('
name|'self'
op|','
name|'name'
op|','
name|'description'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'{'
string|"'security_group'"
op|':'
op|'{'
string|"'name'"
op|':'
name|'name'
op|','
nl|'\n'
string|"'description'"
op|':'
name|'description'
op|'}'
op|'}'
newline|'\n'
nl|'\n'
DECL|member|_make_neutron_security_group_rules_list
dedent|''
name|'def'
name|'_make_neutron_security_group_rules_list'
op|'('
name|'self'
op|','
name|'rules'
op|')'
op|':'
newline|'\n'
indent|' '
name|'new_rules'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'rule'
name|'in'
name|'rules'
op|':'
newline|'\n'
indent|' '
name|'new_rule'
op|'='
op|'{'
op|'}'
newline|'\n'
comment|'# nova only supports ingress rules so all rules are ingress.'
nl|'\n'
name|'new_rule'
op|'['
string|"'direction'"
op|']'
op|'='
string|'"ingress"'
newline|'\n'
name|'new_rule'
op|'['
string|"'protocol'"
op|']'
op|'='
name|'rule'
op|'.'
name|'get'
op|'('
string|"'protocol'"
op|')'
newline|'\n'
nl|'\n'
comment|'# FIXME(arosen) Nova does not expose ethertype on security group'
nl|'\n'
comment|'# rules. Therefore, in the case of self referential rules we'
nl|'\n'
comment|'# should probably assume they want to allow both IPv4 and IPv6.'
nl|'\n'
comment|'# Unfortunately, this would require adding two rules in neutron.'
nl|'\n'
comment|'# The reason we do not do this is because when the user using the'
nl|'\n'
comment|"# nova api wants to remove the rule we'd have to have some way to"
nl|'\n'
comment|'# know that we should delete both of these rules in neutron.'
nl|'\n'
comment|'# For now, self referential rules only support IPv4.'
nl|'\n'
name|'if'
name|'not'
name|'rule'
op|'.'
name|'get'
op|'('
string|"'cidr'"
op|')'
op|':'
newline|'\n'
indent|' '
name|'new_rule'
op|'['
string|"'ethertype'"
op|']'
op|'='
string|"'IPv4'"
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'new_rule'
op|'['
string|"'ethertype'"
op|']'
op|'='
name|'utils'
op|'.'
name|'get_ip_version'
op|'('
name|'rule'
op|'.'
name|'get'
op|'('
string|"'cidr'"
op|')'
op|')'
newline|'\n'
dedent|''
name|'new_rule'
op|'['
string|"'remote_ip_prefix'"
op|']'
op|'='
name|'rule'
op|'.'
name|'get'
op|'('
string|"'cidr'"
op|')'
newline|'\n'
name|'new_rule'
op|'['
string|"'security_group_id'"
op|']'
op|'='
name|'rule'
op|'.'
name|'get'
op|'('
string|"'parent_group_id'"
op|')'
newline|'\n'
name|'new_rule'
op|'['
string|"'remote_group_id'"
op|']'
op|'='
name|'rule'
op|'.'
name|'get'
op|'('
string|"'group_id'"
op|')'
newline|'\n'
name|'if'
string|"'from_port'"
name|'in'
name|'rule'
name|'and'
name|'rule'
op|'['
string|"'from_port'"
op|']'
op|'!='
op|'-'
number|'1'
op|':'
newline|'\n'
indent|' '
name|'new_rule'
op|'['
string|"'port_range_min'"
op|']'
op|'='
name|'rule'
op|'['
string|"'from_port'"
op|']'
newline|'\n'
dedent|''
name|'if'
string|"'to_port'"
name|'in'
name|'rule'
name|'and'
name|'rule'
op|'['
string|"'to_port'"
op|']'
op|'!='
op|'-'
number|'1'
op|':'
newline|'\n'
indent|' '
name|'new_rule'
op|'['
string|"'port_range_max'"
op|']'
op|'='
name|'rule'
op|'['
string|"'to_port'"
op|']'
newline|'\n'
dedent|''
name|'new_rules'
op|'.'
name|'append'
op|'('
name|'new_rule'
op|')'
newline|'\n'
dedent|''
name|'return'
op|'{'
string|"'security_group_rules'"
op|':'
name|'new_rules'
op|'}'
newline|'\n'
nl|'\n'
DECL|member|remove_rules
dedent|''
name|'def'
name|'remove_rules'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'security_group'
op|','
name|'rule_ids'
op|')'
op|':'
newline|'\n'
indent|' '
name|'neutron'
op|'='
name|'neutronapi'
op|'.'
name|'get_client'
op|'('
name|'context'
op|')'
newline|'\n'
name|'rule_ids'
op|'='
name|'set'
op|'('
name|'rule_ids'
op|')'
newline|'\n'
name|'try'
op|':'
newline|'\n'
comment|'# The ec2 api allows one to delete multiple security group rules'
nl|'\n'
comment|'# at once. Since there is no bulk delete for neutron the best'
nl|'\n'
comment|'# thing we can do is delete the rules one by one and hope this'
nl|'\n'
comment|'# works.... :/'
nl|'\n'
indent|' '
name|'for'
name|'rule_id'
name|'in'
name|'range'
op|'('
number|'0'
op|','
name|'len'
op|'('
name|'rule_ids'
op|')'
op|')'
op|':'
newline|'\n'
indent|' '
name|'neutron'
op|'.'
name|'delete_security_group_rule'
op|'('
name|'rule_ids'
op|'.'
name|'pop'
op|'('
op|')'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'except'
name|'n_exc'
op|'.'
name|'NeutronClientException'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'excutils'
op|'.'
name|'save_and_reraise_exception'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'exception'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error unable to delete %s"'
op|')'
op|','
nl|'\n'
name|'rule_ids'
op|')'
newline|'\n'
nl|'\n'
DECL|member|get_rule
dedent|''
dedent|''
dedent|''
name|'def'
name|'get_rule'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'neutron'
op|'='
name|'neutronapi'
op|'.'
name|'get_client'
op|'('
name|'context'
op|')'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'rule'
op|'='
name|'neutron'
op|'.'
name|'show_security_group_rule'
op|'('
nl|'\n'
name|'id'
op|')'
op|'.'
name|'get'
op|'('
string|"'security_group_rule'"
op|')'
newline|'\n'
dedent|''
name|'except'
name|'n_exc'
op|'.'
name|'NeutronClientException'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'exc_info'
op|'='
name|'sys'
op|'.'
name|'exc_info'
op|'('
op|')'
newline|'\n'
name|'if'
name|'e'
op|'.'
name|'status_code'
op|'=='
number|'404'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'debug'
op|'('
string|'"Neutron security group rule %s not found"'
op|','
name|'id'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'raise_not_found'
op|'('
name|'six'
op|'.'
name|'text_type'
op|'('
name|'e'
op|')'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'error'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error: %s"'
op|')'
op|','
name|'e'
op|')'
newline|'\n'
name|'six'
op|'.'
name|'reraise'
op|'('
op|'*'
name|'exc_info'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'return'
name|'self'
op|'.'
name|'_convert_to_nova_security_group_rule_format'
op|'('
name|'rule'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_get_ports_from_server_list
dedent|''
name|'def'
name|'_get_ports_from_server_list'
op|'('
name|'self'
op|','
name|'servers'
op|','
name|'neutron'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns a list of ports used by the servers."""'
newline|'\n'
nl|'\n'
DECL|function|_chunk_by_ids
name|'def'
name|'_chunk_by_ids'
op|'('
name|'servers'
op|','
name|'limit'
op|')'
op|':'
newline|'\n'
indent|' '
name|'ids'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'server'
name|'in'
name|'servers'
op|':'
newline|'\n'
indent|' '
name|'ids'
op|'.'
name|'append'
op|'('
name|'server'
op|'['
string|"'id'"
op|']'
op|')'
newline|'\n'
name|'if'
name|'len'
op|'('
name|'ids'
op|')'
op|'>='
name|'limit'
op|':'
newline|'\n'
indent|' '
name|'yield'
name|'ids'
newline|'\n'
name|'ids'
op|'='
op|'['
op|']'
newline|'\n'
dedent|''
dedent|''
name|'if'
name|'ids'
op|':'
newline|'\n'
indent|' '
name|'yield'
name|'ids'
newline|'\n'
nl|'\n'
comment|'# Note: Have to split the query up as the search criteria'
nl|'\n'
comment|'# form part of the URL, which has a fixed max size'
nl|'\n'
dedent|''
dedent|''
name|'ports'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'ids'
name|'in'
name|'_chunk_by_ids'
op|'('
name|'servers'
op|','
name|'MAX_SEARCH_IDS'
op|')'
op|':'
newline|'\n'
indent|' '
name|'search_opts'
op|'='
op|'{'
string|"'device_id'"
op|':'
name|'ids'
op|'}'
newline|'\n'
name|'ports'
op|'.'
name|'extend'
op|'('
name|'neutron'
op|'.'
name|'list_ports'
op|'('
op|'**'
name|'search_opts'
op|')'
op|'.'
name|'get'
op|'('
string|"'ports'"
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
name|'ports'
newline|'\n'
nl|'\n'
DECL|member|_get_secgroups_from_port_list
dedent|''
name|'def'
name|'_get_secgroups_from_port_list'
op|'('
name|'self'
op|','
name|'ports'
op|','
name|'neutron'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns a dict of security groups keyed by their ids."""'
newline|'\n'
nl|'\n'
DECL|function|_chunk_by_ids
name|'def'
name|'_chunk_by_ids'
op|'('
name|'sg_ids'
op|','
name|'limit'
op|')'
op|':'
newline|'\n'
indent|' '
name|'sg_id_list'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'sg_id'
name|'in'
name|'sg_ids'
op|':'
newline|'\n'
indent|' '
name|'sg_id_list'
op|'.'
name|'append'
op|'('
name|'sg_id'
op|')'
newline|'\n'
name|'if'
name|'len'
op|'('
name|'sg_id_list'
op|')'
op|'>='
name|'limit'
op|':'
newline|'\n'
indent|' '
name|'yield'
name|'sg_id_list'
newline|'\n'
name|'sg_id_list'
op|'='
op|'['
op|']'
newline|'\n'
dedent|''
dedent|''
name|'if'
name|'sg_id_list'
op|':'
newline|'\n'
indent|' '
name|'yield'
name|'sg_id_list'
newline|'\n'
nl|'\n'
comment|'# Find the set of unique SecGroup IDs to search for'
nl|'\n'
dedent|''
dedent|''
name|'sg_ids'
op|'='
name|'set'
op|'('
op|')'
newline|'\n'
name|'for'
name|'port'
name|'in'
name|'ports'
op|':'
newline|'\n'
indent|' '
name|'sg_ids'
op|'.'
name|'update'
op|'('
name|'port'
op|'.'
name|'get'
op|'('
string|"'security_groups'"
op|','
op|'['
op|']'
op|')'
op|')'
newline|'\n'
nl|'\n'
comment|'# Note: Have to split the query up as the search criteria'
nl|'\n'
comment|'# form part of the URL, which has a fixed max size'
nl|'\n'
dedent|''
name|'security_groups'
op|'='
op|'{'
op|'}'
newline|'\n'
name|'for'
name|'sg_id_list'
name|'in'
name|'_chunk_by_ids'
op|'('
name|'sg_ids'
op|','
name|'MAX_SEARCH_IDS'
op|')'
op|':'
newline|'\n'
indent|' '
name|'sg_search_opts'
op|'='
op|'{'
string|"'id'"
op|':'
name|'sg_id_list'
op|'}'
newline|'\n'
name|'search_results'
op|'='
name|'neutron'
op|'.'
name|'list_security_groups'
op|'('
op|'**'
name|'sg_search_opts'
op|')'
newline|'\n'
name|'for'
name|'sg'
name|'in'
name|'search_results'
op|'.'
name|'get'
op|'('
string|"'security_groups'"
op|')'
op|':'
newline|'\n'
indent|' '
name|'security_groups'
op|'['
name|'sg'
op|'['
string|"'id'"
op|']'
op|']'
op|'='
name|'sg'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'return'
name|'security_groups'
newline|'\n'
nl|'\n'
DECL|member|get_instances_security_groups_bindings
dedent|''
name|'def'
name|'get_instances_security_groups_bindings'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'servers'
op|','
nl|'\n'
name|'detailed'
op|'='
name|'False'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns a dict(instance_id, [security_groups]) to allow obtaining\n all of the instances and their security groups in one shot.\n """'
newline|'\n'
nl|'\n'
name|'neutron'
op|'='
name|'neutronapi'
op|'.'
name|'get_client'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'ports'
op|'='
name|'self'
op|'.'
name|'_get_ports_from_server_list'
op|'('
name|'servers'
op|','
name|'neutron'
op|')'
newline|'\n'
nl|'\n'
name|'security_groups'
op|'='
name|'self'
op|'.'
name|'_get_secgroups_from_port_list'
op|'('
name|'ports'
op|','
name|'neutron'
op|')'
newline|'\n'
nl|'\n'
name|'instances_security_group_bindings'
op|'='
op|'{'
op|'}'
newline|'\n'
name|'for'
name|'port'
name|'in'
name|'ports'
op|':'
newline|'\n'
indent|' '
name|'for'
name|'port_sg_id'
name|'in'
name|'port'
op|'.'
name|'get'
op|'('
string|"'security_groups'"
op|','
op|'['
op|']'
op|')'
op|':'
newline|'\n'
nl|'\n'
comment|'# Note: have to check we found port_sg as its possible'
nl|'\n'
comment|"# the port has an SG that this user doesn't have access to"
nl|'\n'
indent|' '
name|'port_sg'
op|'='
name|'security_groups'
op|'.'
name|'get'
op|'('
name|'port_sg_id'
op|')'
newline|'\n'
name|'if'
name|'port_sg'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'detailed'
op|':'
newline|'\n'
indent|' '
name|'sg_entry'
op|'='
name|'self'
op|'.'
name|'_convert_to_nova_security_group_format'
op|'('
nl|'\n'
name|'port_sg'
op|')'
newline|'\n'
name|'instances_security_group_bindings'
op|'.'
name|'setdefault'
op|'('
nl|'\n'
name|'port'
op|'['
string|"'device_id'"
op|']'
op|','
op|'['
op|']'
op|')'
op|'.'
name|'append'
op|'('
name|'sg_entry'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
comment|'# name is optional in neutron so if not specified'
nl|'\n'
comment|'# return id'
nl|'\n'
indent|' '
name|'name'
op|'='
name|'port_sg'
op|'.'
name|'get'
op|'('
string|"'name'"
op|')'
newline|'\n'
name|'if'
name|'not'
name|'name'
op|':'
newline|'\n'
indent|' '
name|'name'
op|'='
name|'port_sg'
op|'.'
name|'get'
op|'('
string|"'id'"
op|')'
newline|'\n'
dedent|''
name|'sg_entry'
op|'='
op|'{'
string|"'name'"
op|':'
name|'name'
op|'}'
newline|'\n'
name|'instances_security_group_bindings'
op|'.'
name|'setdefault'
op|'('
nl|'\n'
name|'port'
op|'['
string|"'device_id'"
op|']'
op|','
op|'['
op|']'
op|')'
op|'.'
name|'append'
op|'('
name|'sg_entry'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
dedent|''
dedent|''
name|'return'
name|'instances_security_group_bindings'
newline|'\n'
nl|'\n'
DECL|member|get_instance_security_groups
dedent|''
name|'def'
name|'get_instance_security_groups'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'instance'
op|','
name|'detailed'
op|'='
name|'False'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns the security groups that are associated with an instance.\n If detailed is True then it also returns the full details of the\n security groups associated with an instance.\n """'
newline|'\n'
name|'servers'
op|'='
op|'['
op|'{'
string|"'id'"
op|':'
name|'instance'
op|'.'
name|'uuid'
op|'}'
op|']'
newline|'\n'
name|'sg_bindings'
op|'='
name|'self'
op|'.'
name|'get_instances_security_groups_bindings'
op|'('
nl|'\n'
name|'context'
op|','
name|'servers'
op|','
name|'detailed'
op|')'
newline|'\n'
name|'return'
name|'sg_bindings'
op|'.'
name|'get'
op|'('
name|'instance'
op|'.'
name|'uuid'
op|','
op|'['
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_has_security_group_requirements
dedent|''
name|'def'
name|'_has_security_group_requirements'
op|'('
name|'self'
op|','
name|'port'
op|')'
op|':'
newline|'\n'
indent|' '
name|'port_security_enabled'
op|'='
name|'port'
op|'.'
name|'get'
op|'('
string|"'port_security_enabled'"
op|','
name|'True'
op|')'
newline|'\n'
name|'has_ip'
op|'='
name|'port'
op|'.'
name|'get'
op|'('
string|"'fixed_ips'"
op|')'
newline|'\n'
name|'if'
name|'has_ip'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'port_security_enabled'
newline|'\n'
dedent|''
name|'return'
name|'False'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'compute_api'
op|'.'
name|'wrap_check_security_groups_policy'
newline|'\n'
DECL|member|add_to_instance
name|'def'
name|'add_to_instance'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'instance'
op|','
name|'security_group_name'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Add security group to the instance."""'
newline|'\n'
nl|'\n'
name|'neutron'
op|'='
name|'neutronapi'
op|'.'
name|'get_client'
op|'('
name|'context'
op|')'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'security_group_id'
op|'='
name|'neutronv20'
op|'.'
name|'find_resourceid_by_name_or_id'
op|'('
nl|'\n'
name|'neutron'
op|','
string|"'security_group'"
op|','
nl|'\n'
name|'security_group_name'
op|','
nl|'\n'
name|'context'
op|'.'
name|'project_id'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'n_exc'
op|'.'
name|'NeutronClientNoUniqueMatch'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'NoUniqueMatch'
op|'('
name|'six'
op|'.'
name|'text_type'
op|'('
name|'e'
op|')'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'n_exc'
op|'.'
name|'NeutronClientException'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'exc_info'
op|'='
name|'sys'
op|'.'
name|'exc_info'
op|'('
op|')'
newline|'\n'
name|'if'
name|'e'
op|'.'
name|'status_code'
op|'=='
number|'404'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
op|'('
name|'_'
op|'('
string|'"Security group %(name)s is not found for "'
nl|'\n'
string|'"project %(project)s"'
op|')'
op|'%'
nl|'\n'
op|'{'
string|"'name'"
op|':'
name|'security_group_name'
op|','
nl|'\n'
string|"'project'"
op|':'
name|'context'
op|'.'
name|'project_id'
op|'}'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'raise_not_found'
op|'('
name|'msg'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'exception'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error:"'
op|')'
op|')'
newline|'\n'
name|'six'
op|'.'
name|'reraise'
op|'('
op|'*'
name|'exc_info'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'params'
op|'='
op|'{'
string|"'device_id'"
op|':'
name|'instance'
op|'.'
name|'uuid'
op|'}'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'ports'
op|'='
name|'neutron'
op|'.'
name|'list_ports'
op|'('
op|'**'
name|'params'
op|')'
op|'.'
name|'get'
op|'('
string|"'ports'"
op|')'
newline|'\n'
dedent|''
name|'except'
name|'n_exc'
op|'.'
name|'NeutronClientException'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'excutils'
op|'.'
name|'save_and_reraise_exception'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'exception'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error:"'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'if'
name|'not'
name|'ports'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
op|'('
name|'_'
op|'('
string|'"instance_id %s could not be found as device id on"'
nl|'\n'
string|'" any ports"'
op|')'
op|'%'
name|'instance'
op|'.'
name|'uuid'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'raise_not_found'
op|'('
name|'msg'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'for'
name|'port'
name|'in'
name|'ports'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'not'
name|'self'
op|'.'
name|'_has_security_group_requirements'
op|'('
name|'port'
op|')'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'warning'
op|'('
name|'_LW'
op|'('
string|'"Cannot add security group %(name)s to "'
nl|'\n'
string|'"%(instance)s since the port %(port_id)s "'
nl|'\n'
string|'"does not meet security requirements"'
op|')'
op|','
nl|'\n'
op|'{'
string|"'name'"
op|':'
name|'security_group_name'
op|','
nl|'\n'
string|"'instance'"
op|':'
name|'instance'
op|'.'
name|'uuid'
op|','
nl|'\n'
string|"'port_id'"
op|':'
name|'port'
op|'['
string|"'id'"
op|']'
op|'}'
op|')'
newline|'\n'
name|'raise'
name|'exception'
op|'.'
name|'SecurityGroupCannotBeApplied'
op|'('
op|')'
newline|'\n'
dedent|''
name|'if'
string|"'security_groups'"
name|'not'
name|'in'
name|'port'
op|':'
newline|'\n'
indent|' '
name|'port'
op|'['
string|"'security_groups'"
op|']'
op|'='
op|'['
op|']'
newline|'\n'
dedent|''
name|'port'
op|'['
string|"'security_groups'"
op|']'
op|'.'
name|'append'
op|'('
name|'security_group_id'
op|')'
newline|'\n'
name|'updated_port'
op|'='
op|'{'
string|"'security_groups'"
op|':'
name|'port'
op|'['
string|"'security_groups'"
op|']'
op|'}'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'info'
op|'('
name|'_LI'
op|'('
string|'"Adding security group %(security_group_id)s to "'
nl|'\n'
string|'"port %(port_id)s"'
op|')'
op|','
nl|'\n'
op|'{'
string|"'security_group_id'"
op|':'
name|'security_group_id'
op|','
nl|'\n'
string|"'port_id'"
op|':'
name|'port'
op|'['
string|"'id'"
op|']'
op|'}'
op|')'
newline|'\n'
name|'neutron'
op|'.'
name|'update_port'
op|'('
name|'port'
op|'['
string|"'id'"
op|']'
op|','
op|'{'
string|"'port'"
op|':'
name|'updated_port'
op|'}'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'Exception'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'excutils'
op|'.'
name|'save_and_reraise_exception'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'exception'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error:"'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
dedent|''
dedent|''
op|'@'
name|'compute_api'
op|'.'
name|'wrap_check_security_groups_policy'
newline|'\n'
DECL|member|remove_from_instance
name|'def'
name|'remove_from_instance'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'instance'
op|','
name|'security_group_name'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Remove the security group associated with the instance."""'
newline|'\n'
name|'neutron'
op|'='
name|'neutronapi'
op|'.'
name|'get_client'
op|'('
name|'context'
op|')'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'security_group_id'
op|'='
name|'neutronv20'
op|'.'
name|'find_resourceid_by_name_or_id'
op|'('
nl|'\n'
name|'neutron'
op|','
string|"'security_group'"
op|','
nl|'\n'
name|'security_group_name'
op|','
nl|'\n'
name|'context'
op|'.'
name|'project_id'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'n_exc'
op|'.'
name|'NeutronClientException'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'exc_info'
op|'='
name|'sys'
op|'.'
name|'exc_info'
op|'('
op|')'
newline|'\n'
name|'if'
name|'e'
op|'.'
name|'status_code'
op|'=='
number|'404'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
op|'('
name|'_'
op|'('
string|'"Security group %(name)s is not found for "'
nl|'\n'
string|'"project %(project)s"'
op|')'
op|'%'
nl|'\n'
op|'{'
string|"'name'"
op|':'
name|'security_group_name'
op|','
nl|'\n'
string|"'project'"
op|':'
name|'context'
op|'.'
name|'project_id'
op|'}'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'raise_not_found'
op|'('
name|'msg'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'exception'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error:"'
op|')'
op|')'
newline|'\n'
name|'six'
op|'.'
name|'reraise'
op|'('
op|'*'
name|'exc_info'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'params'
op|'='
op|'{'
string|"'device_id'"
op|':'
name|'instance'
op|'.'
name|'uuid'
op|'}'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'ports'
op|'='
name|'neutron'
op|'.'
name|'list_ports'
op|'('
op|'**'
name|'params'
op|')'
op|'.'
name|'get'
op|'('
string|"'ports'"
op|')'
newline|'\n'
dedent|''
name|'except'
name|'n_exc'
op|'.'
name|'NeutronClientException'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'excutils'
op|'.'
name|'save_and_reraise_exception'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'exception'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error:"'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'if'
name|'not'
name|'ports'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
op|'('
name|'_'
op|'('
string|'"instance_id %s could not be found as device id on"'
nl|'\n'
string|'" any ports"'
op|')'
op|'%'
name|'instance'
op|'.'
name|'uuid'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'raise_not_found'
op|'('
name|'msg'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'found_security_group'
op|'='
name|'False'
newline|'\n'
name|'for'
name|'port'
name|'in'
name|'ports'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'port'
op|'.'
name|'get'
op|'('
string|"'security_groups'"
op|','
op|'['
op|']'
op|')'
op|'.'
name|'remove'
op|'('
name|'security_group_id'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'ValueError'
op|':'
newline|'\n'
comment|'# When removing a security group from an instance the security'
nl|'\n'
comment|'# group should be on both ports since it was added this way if'
nl|'\n'
comment|'# done through the nova api. In case it is not a 404 is only'
nl|'\n'
comment|'# raised if the security group is not found on any of the'
nl|'\n'
comment|'# ports on the instance.'
nl|'\n'
indent|' '
name|'continue'
newline|'\n'
nl|'\n'
dedent|''
name|'updated_port'
op|'='
op|'{'
string|"'security_groups'"
op|':'
name|'port'
op|'['
string|"'security_groups'"
op|']'
op|'}'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'info'
op|'('
name|'_LI'
op|'('
string|'"Adding security group %(security_group_id)s to "'
nl|'\n'
string|'"port %(port_id)s"'
op|')'
op|','
nl|'\n'
op|'{'
string|"'security_group_id'"
op|':'
name|'security_group_id'
op|','
nl|'\n'
string|"'port_id'"
op|':'
name|'port'
op|'['
string|"'id'"
op|']'
op|'}'
op|')'
newline|'\n'
name|'neutron'
op|'.'
name|'update_port'
op|'('
name|'port'
op|'['
string|"'id'"
op|']'
op|','
op|'{'
string|"'port'"
op|':'
name|'updated_port'
op|'}'
op|')'
newline|'\n'
name|'found_security_group'
op|'='
name|'True'
newline|'\n'
dedent|''
name|'except'
name|'Exception'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'excutils'
op|'.'
name|'save_and_reraise_exception'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'exception'
op|'('
name|'_LE'
op|'('
string|'"Neutron Error:"'
op|')'
op|')'
newline|'\n'
dedent|''
dedent|''
dedent|''
name|'if'
name|'not'
name|'found_security_group'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
op|'('
name|'_'
op|'('
string|'"Security group %(security_group_name)s not associated "'
nl|'\n'
string|'"with the instance %(instance)s"'
op|')'
op|'%'
nl|'\n'
op|'{'
string|"'security_group_name'"
op|':'
name|'security_group_name'
op|','
nl|'\n'
string|"'instance'"
op|':'
name|'instance'
op|'.'
name|'uuid'
op|'}'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'raise_not_found'
op|'('
name|'msg'
op|')'
newline|'\n'
nl|'\n'
DECL|member|populate_security_groups
dedent|''
dedent|''
name|'def'
name|'populate_security_groups'
op|'('
name|'self'
op|','
name|'security_groups'
op|')'
op|':'
newline|'\n'
comment|'# Returning an empty list since we do not want to populate this field'
nl|'\n'
comment|'# in the nova database if using the neutron driver'
nl|'\n'
indent|' '
name|'return'
name|'objects'
op|'.'
name|'SecurityGroupList'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|get_default_rule
dedent|''
name|'def'
name|'get_default_rule'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
name|'_'
op|'('
string|'"Network driver does not support this function."'
op|')'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotImplemented'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
nl|'\n'
DECL|member|get_all_default_rules
dedent|''
name|'def'
name|'get_all_default_rules'
op|'('
name|'self'
op|','
name|'context'
op|')'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
name|'_'
op|'('
string|'"Network driver does not support this function."'
op|')'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotImplemented'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
nl|'\n'
DECL|member|add_default_rules
dedent|''
name|'def'
name|'add_default_rules'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'vals'
op|')'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
name|'_'
op|'('
string|'"Network driver does not support this function."'
op|')'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotImplemented'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
nl|'\n'
DECL|member|remove_default_rules
dedent|''
name|'def'
name|'remove_default_rules'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'rule_ids'
op|')'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
name|'_'
op|'('
string|'"Network driver does not support this function."'
op|')'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotImplemented'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
nl|'\n'
DECL|member|default_rule_exists
dedent|''
name|'def'
name|'default_rule_exists'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'values'
op|')'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
name|'_'
op|'('
string|'"Network driver does not support this function."'
op|')'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotImplemented'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 13.533752
| 359
| 0.598519
| 7,602
| 51,726
| 3.968166
| 0.05538
| 0.121726
| 0.104422
| 0.048531
| 0.814825
| 0.762381
| 0.716535
| 0.669495
| 0.609991
| 0.566333
| 0
| 0.002041
| 0.128369
| 51,726
| 3,821
| 360
| 13.537294
| 0.667036
| 0
| 0
| 0.949751
| 0
| 0.000785
| 0.667053
| 0.080849
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.000523
| 0.003926
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2b1f04bfe2228a33cee5aa9e411acc723320493e
| 169
|
py
|
Python
|
global_finprint/core/context_processors.py
|
GlobalFinPrint/global_finprint
|
8a91ceaaed42aaa716d8c9f27518ba673ebf351c
|
[
"Apache-2.0"
] | null | null | null |
global_finprint/core/context_processors.py
|
GlobalFinPrint/global_finprint
|
8a91ceaaed42aaa716d8c9f27518ba673ebf351c
|
[
"Apache-2.0"
] | 6
|
2020-06-05T18:42:32.000Z
|
2022-01-13T00:48:57.000Z
|
global_finprint/core/context_processors.py
|
GlobalFinPrint/global_finprint
|
8a91ceaaed42aaa716d8c9f27518ba673ebf351c
|
[
"Apache-2.0"
] | null | null | null |
from global_finprint.core.mixins import UserAllowedMixin
def user_allowed_processor(request):
return {'user_allowed': UserAllowedMixin.user_allowed(request.user)}
| 28.166667
| 72
| 0.828402
| 20
| 169
| 6.75
| 0.65
| 0.244444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088757
| 169
| 5
| 73
| 33.8
| 0.876623
| 0
| 0
| 0
| 0
| 0
| 0.071006
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 6
|
996ebb92d834e1c5709b8abc492539e4a9c76ae1
| 156
|
py
|
Python
|
hanibal/ans_reporte/report/__init__.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | null | null | null |
hanibal/ans_reporte/report/__init__.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | null | null | null |
hanibal/ans_reporte/report/__init__.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
import reporte_cobranza_alumnos_mensualidades
import reporte_financiero
import reporte_tutor_resumen
import reporte_gestion_cobranza
| 31.2
| 45
| 0.865385
| 19
| 156
| 6.684211
| 0.631579
| 0.409449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006944
| 0.076923
| 156
| 5
| 46
| 31.2
| 0.875
| 0.134615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
99915a1c6293d2fee9b15a2227b3d2c590eef622
| 21
|
py
|
Python
|
couch/__init__.py
|
specialunderwear/tornado-couchdb
|
b1f1d15b1f52c5de124a58c1353797a3ed04cea3
|
[
"MIT"
] | null | null | null |
couch/__init__.py
|
specialunderwear/tornado-couchdb
|
b1f1d15b1f52c5de124a58c1353797a3ed04cea3
|
[
"MIT"
] | null | null | null |
couch/__init__.py
|
specialunderwear/tornado-couchdb
|
b1f1d15b1f52c5de124a58c1353797a3ed04cea3
|
[
"MIT"
] | null | null | null |
from .couch import *
| 10.5
| 20
| 0.714286
| 3
| 21
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
41f1c92a83a44b49e2fc8744fce947b6b03385ba
| 7,354
|
py
|
Python
|
tests/test_cli.py
|
ipadjen/cjio
|
b91ca46cab7829b21193a175c2a09de6fad93e7b
|
[
"MIT"
] | 17
|
2018-06-04T23:15:39.000Z
|
2019-10-21T15:28:45.000Z
|
tests/test_cli.py
|
ipadjen/cjio
|
b91ca46cab7829b21193a175c2a09de6fad93e7b
|
[
"MIT"
] | 25
|
2018-10-24T13:52:21.000Z
|
2019-10-28T16:30:37.000Z
|
tests/test_cli.py
|
ipadjen/cjio
|
b91ca46cab7829b21193a175c2a09de6fad93e7b
|
[
"MIT"
] | 4
|
2019-10-02T07:45:16.000Z
|
2019-10-25T00:01:39.000Z
|
import os
import os.path
from click.testing import CliRunner
import json
from cjio import cjio
class TestCLI:
def test_crs_assign_cli(self, delft_path, data_output_dir):
p_out = os.path.join(data_output_dir, 'crs_assign.json')
runner = CliRunner()
result = runner.invoke(cjio.cli,
args=[delft_path,
'crs_assign', '4326',
'save',
p_out])
assert result.exit_code == 0
assert os.path.exists(p_out) == True
os.remove(p_out)
def test_vertices_clean_cli(self, delft_path, data_output_dir):
p_out = os.path.join(data_output_dir, 'clean.json')
runner = CliRunner()
result = runner.invoke(cjio.cli,
args=[delft_path,
'vertices_clean',
'save',
p_out])
assert result.exit_code == 0
assert os.path.exists(p_out) == True
os.remove(p_out)
def test_export_obj_cli(self, delft_path, data_output_dir):
p_out = os.path.join(data_output_dir, 'delft.obj')
runner = CliRunner()
result = runner.invoke(cjio.cli,
args=[delft_path,
'export',
'--format', 'obj',
p_out])
assert result.exit_code == 0
assert os.path.exists(p_out) == True
os.remove(p_out)
def test_metadata_get_cli(self, delft_path):
runner = CliRunner()
result = runner.invoke(cjio.cli,
args=[delft_path,
'metadata_get'])
assert result.exit_code == 0
def test_info_cli(self, delft_path):
runner = CliRunner()
result = runner.invoke(cjio.cli,
args=[delft_path,
'info'])
assert result.exit_code == 0
def test_merge_cli(self, delft_path, rotterdam_subset_path, data_output_dir):
p_out = os.path.join(data_output_dir, 'merge.json')
runner = CliRunner()
result = runner.invoke(cjio.cli,
args=[delft_path,
'merge', rotterdam_subset_path,
'save',
p_out])
assert result.exit_code == 0
assert os.path.exists(p_out) == True
os.remove(p_out)
def test_attribute_remove_cli(self, delft_path, data_output_dir):
p_out = os.path.join(data_output_dir, 'attribute_remove.json')
runner = CliRunner()
result = runner.invoke(cjio.cli,
args=[delft_path,
'attribute_remove', 'bgt_status',
'save',
p_out])
assert result.exit_code == 0
assert os.path.exists(p_out) == True
os.remove(p_out)
def test_materials_remove_cli(self, rotterdam_subset_path, data_output_dir):
p_out = os.path.join(data_output_dir, 'materials_remove.json')
runner = CliRunner()
result = runner.invoke(cjio.cli,
args=[rotterdam_subset_path,
'materials_remove',
'save',
p_out])
assert result.exit_code == 0
assert os.path.exists(p_out) == True
os.remove(p_out)
def test_textures_remove_cli(self, rotterdam_subset_path, data_output_dir):
p_out = os.path.join(data_output_dir, 'textures_remove.json')
runner = CliRunner()
result = runner.invoke(cjio.cli,
args=[rotterdam_subset_path,
'textures_remove',
'save',
p_out])
assert result.exit_code == 0
assert os.path.exists(p_out) == True
os.remove(p_out)
def test_attribute_rename_cli(self, delft_path, data_output_dir):
p_out = os.path.join(data_output_dir, 'attribute_rename.json')
runner = CliRunner()
result = runner.invoke(cjio.cli,
args=[delft_path,
'attribute_rename', 'hoek', 'angle',
'save',
p_out])
assert result.exit_code == 0
assert os.path.exists(p_out) == True
os.remove(p_out)
def test_save_cli(self, delft_path, data_output_dir):
p_out = os.path.join(data_output_dir, 'save.json')
runner = CliRunner()
result = runner.invoke(cjio.cli,
args=[delft_path,
'save', '--indent',
p_out])
assert result.exit_code == 0
assert os.path.exists(p_out) == True
os.remove(p_out)
def test_crs_translate_cli(self, delft_path, data_output_dir):
p_out = os.path.join(data_output_dir, 'crs_translate.json')
runner = CliRunner()
result = runner.invoke(cjio.cli,
args=[delft_path,
'crs_translate',
'--values', '-1', '-1', '-1',
'save',
p_out])
assert result.exit_code == 0
assert os.path.exists(p_out) == True
os.remove(p_out)
def test_metadata_update_cli(self, delft_path, data_output_dir):
p_out = os.path.join(data_output_dir, 'metadata_update.json')
runner = CliRunner()
result = runner.invoke(cjio.cli,
args=[delft_path,
'metadata_update',
'save',
p_out])
assert result.exit_code == 0
assert os.path.exists(p_out) == True
os.remove(p_out)
def test_upgrade_cli(self, delft_path, data_output_dir):
p_out = os.path.join(data_output_dir, 'upgrade.json')
runner = CliRunner()
result = runner.invoke(cjio.cli,
args=[delft_path,
'upgrade',
'save',
p_out])
assert result.exit_code == 0
assert os.path.exists(p_out) == True
os.remove(p_out)
def test_validate_cli(self, delft_path):
runner = CliRunner()
result = runner.invoke(cjio.cli,
args=[delft_path,
'validate'])
assert result.exit_code == 0
| 36.587065
| 81
| 0.458798
| 733
| 7,354
| 4.321965
| 0.085948
| 0.060606
| 0.098485
| 0.127841
| 0.851957
| 0.845328
| 0.845328
| 0.827652
| 0.827652
| 0.827652
| 0
| 0.005471
| 0.453223
| 7,354
| 200
| 82
| 36.77
| 0.782392
| 0
| 0
| 0.679487
| 0
| 0
| 0.060239
| 0.008567
| 0
| 0
| 0
| 0
| 0.173077
| 1
| 0.096154
| false
| 0
| 0.032051
| 0
| 0.134615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
41faf16276781283285b01345d74d50ee2557b26
| 3,605
|
py
|
Python
|
src/si/supervised/linreg.py
|
ALex99-bot/SIB
|
fe2bbc6ebdd39fcab8a93937d688adaee1243da3
|
[
"Apache-2.0"
] | null | null | null |
src/si/supervised/linreg.py
|
ALex99-bot/SIB
|
fe2bbc6ebdd39fcab8a93937d688adaee1243da3
|
[
"Apache-2.0"
] | null | null | null |
src/si/supervised/linreg.py
|
ALex99-bot/SIB
|
fe2bbc6ebdd39fcab8a93937d688adaee1243da3
|
[
"Apache-2.0"
] | null | null | null |
from .model import Model
from ..util.metrics import mse, accuracy_score
from ..util.util import add_intersect
import numpy as np
class LinearRegression(Model):
"""Regressão linear sem regularização"""
def __init__(self, gd=False, epochs=1000, lr=0.001):
super(LinearRegression, self).__init__()
self.gd = gd
self.theta = None
self.epochs = epochs
self.lr = lr
def fit(self, dataset):
X, y = dataset.getXy()
X = np.hstack((np.ones((X.shape[0], 1)), X))
self.X = X
self.y = y
# Closed form or GD
self.train_gd(X, y) if self.gd else self.train_closed(X, y)
self.is_fitted = True
def train_closed(self, X, y):
"""
Uses closed for linear algebra to fit the model.
theta = inv(XT*X)*XT*y
"""
self.theta = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y)
def train_gd(self, X, y):
m = X.shape[0]
n = X.shape[1]
self.history = {}
self.theta = np.zeros(n)
for epoch in range(self.epochs):
grad = 1/m*(X.dot(self.theta)-y).dot(X)
self.theta -= self.lr*grad
self.history[epoch] = [self.theta[:], self.cost()]
def predict(self, x):
assert self.is_fitted, 'Model must be fit before predicting'
_x = np.hstack(([1], x))
return np.dot(self.theta, _x)
def cost(self, X=None, y=None, theta=None):
X = add_intersect(X) if X is not None else self.X
y = y if y is not None else self.y
theta = theta if theta is not None else self.theta
y_pred = np.dot(X, theta)
return mse(y, y_pred)/2
class LinearRegressionReg(LinearRegression):
"""Regressão linear com regularização"""
def __init__(self, gd=False, epochs=1000, lr=0.001, lbd=1):
"""
Linear regression model with L2 regularazation
"""
super(LinearRegressionReg, self).__init__(gd=gd, epochs=epochs, lr=lr)
self.gd = gd
self.theta = None
self.epochs = epochs
self.lr = lr
self.lbd = lbd
def train_closed(self, X, y):
"""
Uses closed form linear algebra to fit the model
theta = inv(XT*X+lbd*I)*XT*y
"""
n = X.shape[1]
identity = np.eye(n)
identity[0, 0] = 0
self.theta = np.linalg.inv(X.T.dot(X)+self.lbd*identity).dot(X.T).dot(y)
def train_gd(self, X, y):
"""
Uses gradient descent to fit the model.
"""
m = X.shape[0]
n = X.shape[1]
self.history = {}
self.theta = np.zeros(n)
lbds = np.full(m, self.lbd)
lbds[0] = 0
for epoch in range(self.epochs):
grad = (X.dot(self.theta)-y).dot(X)
self.theta -= (self.lr/m)*(lbds+grad)
self.history[epoch] = [self.theta[:], self.cost()]
def fit(self, dataset):
X, y = dataset.getXy()
X = np.hstack((np.ones((X.shape[0], 1)), X))
self.X = X
self.y = y
# Closed form or GD
self.train_gd(X, y) if self.gd else self.train_closed(X, y)
self.is_fitted = True
def predict(self, x):
assert self.is_fitted, 'Model must be fit before predicting'
_x = np.hstack(([1], x))
return np.dot(self.theta, _x)
def cost(self, X=None, y=None, theta=None):
X = add_intersect(X) if X is not None else self.X
y = y if y is not None else self.y
theta = theta if theta is not None else self.theta
y_pred = np.dot(X, theta)
return mse(y, y_pred)/2
| 31.077586
| 80
| 0.554785
| 553
| 3,605
| 3.544304
| 0.168174
| 0.073469
| 0.018367
| 0.039796
| 0.761224
| 0.761224
| 0.761224
| 0.731633
| 0.702551
| 0.638265
| 0
| 0.014865
| 0.30957
| 3,605
| 115
| 81
| 31.347826
| 0.772599
| 0.095146
| 0
| 0.7375
| 0
| 0
| 0.022286
| 0
| 0
| 0
| 0
| 0
| 0.025
| 1
| 0.15
| false
| 0
| 0.05
| 0
| 0.275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
513a79328f993b1c234148a0782d02cbbf7a4c3f
| 334
|
py
|
Python
|
cride/rides/permissions/rides.py
|
jesusRL96/curso_platzi_django_adv
|
870596b1ba7285a25777e14c031c9026a5d0f754
|
[
"MIT"
] | null | null | null |
cride/rides/permissions/rides.py
|
jesusRL96/curso_platzi_django_adv
|
870596b1ba7285a25777e14c031c9026a5d0f754
|
[
"MIT"
] | null | null | null |
cride/rides/permissions/rides.py
|
jesusRL96/curso_platzi_django_adv
|
870596b1ba7285a25777e14c031c9026a5d0f754
|
[
"MIT"
] | null | null | null |
from rest_framework.permissions import BasePermission
class IsRideOwner(BasePermission):
def has_object_permission(self, request, view, obj):
return request.user == obj.offered_by
class IsNotRideOwner(BasePermission):
def has_object_permission(self, request, view, obj):
return request.user != obj.offered_by
| 37.111111
| 56
| 0.766467
| 40
| 334
| 6.225
| 0.525
| 0.136546
| 0.160643
| 0.208835
| 0.666667
| 0.666667
| 0.666667
| 0.666667
| 0.666667
| 0.666667
| 0
| 0
| 0.152695
| 334
| 9
| 57
| 37.111111
| 0.879859
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.285714
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
514c58b4f5d3a3aca6ca2d91fe4716c70c50af31
| 31
|
py
|
Python
|
kamal/slim/distillation/data_free/__init__.py
|
zju-vipa/KamalEngine
|
0276eb062595d52472090fbcbcedcd76db8cfd44
|
[
"Apache-2.0"
] | 79
|
2019-07-04T11:19:31.000Z
|
2022-03-24T13:32:29.000Z
|
kamal/slim/distillation/data_free/__init__.py
|
zju-vipa/KamalEngine
|
0276eb062595d52472090fbcbcedcd76db8cfd44
|
[
"Apache-2.0"
] | 4
|
2019-09-08T13:20:52.000Z
|
2021-06-15T12:07:37.000Z
|
kamal/slim/distillation/data_free/__init__.py
|
zju-vipa/KamalEngine
|
0276eb062595d52472090fbcbcedcd76db8cfd44
|
[
"Apache-2.0"
] | 17
|
2019-07-23T09:48:45.000Z
|
2022-03-14T03:19:40.000Z
|
from .zskt import ZSKTDistiller
| 31
| 31
| 0.870968
| 4
| 31
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5152035a3cfa61a9160d6c04fc2f5d2f2a34570b
| 177
|
py
|
Python
|
phonebooks_api/admin.py
|
Kien676/phonebooks-rest-api
|
62556bd5f9cdfc3216c38672f80a7fa444407e27
|
[
"MIT"
] | null | null | null |
phonebooks_api/admin.py
|
Kien676/phonebooks-rest-api
|
62556bd5f9cdfc3216c38672f80a7fa444407e27
|
[
"MIT"
] | null | null | null |
phonebooks_api/admin.py
|
Kien676/phonebooks-rest-api
|
62556bd5f9cdfc3216c38672f80a7fa444407e27
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from phonebooks_api import models
# Register your models here.
admin.site.register(models.Phonebook)
admin.site.register(models.userPhonebook)
| 25.285714
| 41
| 0.836158
| 24
| 177
| 6.125
| 0.583333
| 0.122449
| 0.231293
| 0.312925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090395
| 177
| 6
| 42
| 29.5
| 0.913043
| 0.146893
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
5152c0010d1021ef61baa122971c8fe238728281
| 251
|
py
|
Python
|
lldb/packages/Python/lldbsuite/test/lang/cpp/thread_local/TestThreadLocal.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 456
|
2015-01-15T08:44:39.000Z
|
2022-03-31T22:34:57.000Z
|
lldb/packages/Python/lldbsuite/test/lang/cpp/thread_local/TestThreadLocal.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 14
|
2020-02-03T23:39:51.000Z
|
2021-07-20T16:24:25.000Z
|
lldb/packages/Python/lldbsuite/test/lang/cpp/thread_local/TestThreadLocal.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 254
|
2015-01-19T21:53:39.000Z
|
2022-02-23T19:38:56.000Z
|
from lldbsuite.test import lldbinline
from lldbsuite.test import decorators
lldbinline.MakeInlineTest(__file__, globals(),
lldbinline.expectedFailureAll(oslist=[
"windows", "linux", "netbsd"]))
| 35.857143
| 64
| 0.625498
| 20
| 251
| 7.65
| 0.7
| 0.169935
| 0.222222
| 0.300654
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.282869
| 251
| 6
| 65
| 41.833333
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0.071713
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
5ae622b52873b04e33d18a92a511100868eafa8f
| 151
|
py
|
Python
|
shub/version.py
|
PyExplorer/shub
|
dc38191e6593f3c012cb89ed1551f8b0dd2981d8
|
[
"BSD-3-Clause"
] | 111
|
2015-02-05T15:24:15.000Z
|
2022-03-31T03:31:22.000Z
|
shub/version.py
|
PyExplorer/shub
|
dc38191e6593f3c012cb89ed1551f8b0dd2981d8
|
[
"BSD-3-Clause"
] | 355
|
2015-01-01T16:18:46.000Z
|
2022-03-18T15:41:10.000Z
|
shub/version.py
|
PyExplorer/shub
|
dc38191e6593f3c012cb89ed1551f8b0dd2981d8
|
[
"BSD-3-Clause"
] | 79
|
2015-02-23T17:07:32.000Z
|
2022-01-03T09:15:39.000Z
|
from __future__ import absolute_import
import click
import shub
@click.command(help="Show shub version")
def cli():
click.echo(shub.__version__)
| 16.777778
| 40
| 0.774834
| 21
| 151
| 5.142857
| 0.619048
| 0.203704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13245
| 151
| 8
| 41
| 18.875
| 0.824427
| 0
| 0
| 0
| 0
| 0
| 0.112583
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.5
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8507986520923c583195266f9135f1236808d5d5
| 1,853
|
py
|
Python
|
tests/test_Task04.py
|
AlekseiPrivalihin/FormalLanguageTheory
|
83f229122afb07172780443fec00f837eed6e0ab
|
[
"Apache-2.0"
] | null | null | null |
tests/test_Task04.py
|
AlekseiPrivalihin/FormalLanguageTheory
|
83f229122afb07172780443fec00f837eed6e0ab
|
[
"Apache-2.0"
] | 4
|
2020-09-17T08:41:15.000Z
|
2020-12-25T08:56:50.000Z
|
tests/test_Task04.py
|
AlekseiPrivalihin/FormalLanguageTheory
|
83f229122afb07172780443fec00f837eed6e0ab
|
[
"Apache-2.0"
] | null | null | null |
from Graph import Graph
from ContextFreeGrammar import ChomskyNormalForm as CNF
from pyformlang.cfg import *
def test_from_file():
gr = CNF.from_file("cfg_input.txt")
word_accepted = list(map(Terminal, 'aaba'))
word_declined = list(map(Terminal, 'aabb'))
assert gr.contains(word_accepted)
assert not gr.contains([])
assert not gr.contains(word_declined)
def test_from_file_with_eps():
gr = CNF.from_file("cfg_eps_input.txt")
word_accepted = list(map(Terminal, 'aaba'))
word_declined = list(map(Terminal, 'aabb'))
assert gr.contains(word_accepted)
assert gr.contains([])
assert not gr.contains(word_declined)
def test_CYK():
gr = CNF.from_file("cfg_input.txt")
assert gr.CYK('ab')
assert gr.CYK('aaba')
assert not gr.CYK('')
assert not gr.CYK('abc')
def test_CYK_with_eps():
gr = CNF.from_file("cfg_eps_input.txt")
assert gr.CYK('ab')
assert gr.CYK('aaba')
assert gr.CYK('')
assert not gr.CYK('abc')
def test_Hellings():
gr = CNF.from_file("cfg_input.txt")
g = Graph()
g.from_file("input4.txt")
reachable = frozenset(gr.Hellings(g))
assert reachable == {(0, 2), (2, 0), (0, 0), (2, 1), (0, 1)}
def test_Hellings_empty_graph():
gr = CNF.from_file("cfg_eps_input.txt")
g = Graph()
g.from_file("empty_input.txt")
reachable = frozenset(gr.Hellings(g))
assert reachable == frozenset()
def test_Hellings_empty_grammar():
gr = CNF.from_file("empty_input.txt")
g = Graph()
g.from_file("input.txt")
reachable = frozenset(gr.Hellings(g))
assert reachable == frozenset()
def test_Hellings_eps():
gr = CNF.from_file("cfg_eps_input.txt")
g = Graph()
g.from_file("input4.txt")
reachable = frozenset(gr.Hellings(g))
assert reachable == {(0, 0), (0, 1), (0, 2), (1, 1), (2, 0), (2, 1), (2, 2)}
| 29.887097
| 80
| 0.652455
| 278
| 1,853
| 4.161871
| 0.147482
| 0.096802
| 0.06223
| 0.089888
| 0.827139
| 0.811582
| 0.811582
| 0.750216
| 0.750216
| 0.697494
| 0
| 0.017253
| 0.186724
| 1,853
| 61
| 81
| 30.377049
| 0.750498
| 0
| 0
| 0.622642
| 0
| 0
| 0.107933
| 0
| 0
| 0
| 0
| 0
| 0.339623
| 1
| 0.150943
| false
| 0
| 0.056604
| 0
| 0.207547
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8512633cda730eddda5f027471726f40c343bf01
| 449
|
py
|
Python
|
src/bitprint.py
|
smorin/bitprint
|
9116da2812860e27a9fc65a24064e536d3316185
|
[
"MIT"
] | null | null | null |
src/bitprint.py
|
smorin/bitprint
|
9116da2812860e27a9fc65a24064e536d3316185
|
[
"MIT"
] | null | null | null |
src/bitprint.py
|
smorin/bitprint
|
9116da2812860e27a9fc65a24064e536d3316185
|
[
"MIT"
] | null | null | null |
def get_binary_rep(data, spacing=0, separator=" "):
format(i,'b').zfill(8)
def bin_rep_string_arr(data):
map()
return []
def bin_rep_int_arr(data):
return []
def bin_rep_unicode_arr(data):
return []
def bin_rep_bytes_arr(data):
return []
def hex_rep_string_arr(data):
return []
def hex_rep_int_arr(data):
return []
def hex_rep_unicode_arr(data):
return []
def hex_rep_bytes_arr(data):
return []
| 13.205882
| 51
| 0.66147
| 70
| 449
| 3.871429
| 0.314286
| 0.206642
| 0.335793
| 0.354244
| 0.656827
| 0.571956
| 0
| 0
| 0
| 0
| 0
| 0.005618
| 0.207127
| 449
| 34
| 52
| 13.205882
| 0.755618
| 0
| 0
| 0.421053
| 0
| 0
| 0.004484
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.473684
| false
| 0
| 0
| 0.368421
| 0.894737
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
518fd80ec3eead005ab935d9078fbcf94556ba49
| 201
|
py
|
Python
|
python_modules/dagstermill/dagstermill/errors.py
|
bambielli-flex/dagster
|
30b75ba7c62fc536bc827f177c1dc6ba20f5ae20
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagstermill/dagstermill/errors.py
|
bambielli-flex/dagster
|
30b75ba7c62fc536bc827f177c1dc6ba20f5ae20
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagstermill/dagstermill/errors.py
|
bambielli-flex/dagster
|
30b75ba7c62fc536bc827f177c1dc6ba20f5ae20
|
[
"Apache-2.0"
] | null | null | null |
from dagster.core.errors import DagsterUserCodeExecutionError
class DagstermillError(Exception):
pass
class DagstermillExecutionError(DagstermillError, DagsterUserCodeExecutionError):
pass
| 20.1
| 81
| 0.840796
| 15
| 201
| 11.266667
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114428
| 201
| 9
| 82
| 22.333333
| 0.949438
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.4
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
51aebc0554fb94f28a954001baec84f2ed48d14e
| 40,873
|
py
|
Python
|
fernan/ans.py
|
Ariel96cs/SimpleMapReduceFramework
|
7cf482f94b1ed1e6436a7b229dfe09c3c0906d56
|
[
"MIT"
] | null | null | null |
fernan/ans.py
|
Ariel96cs/SimpleMapReduceFramework
|
7cf482f94b1ed1e6436a7b229dfe09c3c0906d56
|
[
"MIT"
] | null | null | null |
fernan/ans.py
|
Ariel96cs/SimpleMapReduceFramework
|
7cf482f94b1ed1e6436a7b229dfe09c3c0906d56
|
[
"MIT"
] | null | null | null |
s = [('republishes', True), ('allem', True), ('Soon', True), ('low', True), ('difficult', True), ('simply', True), ('lost', True), ('FindNodeMessages', True), ('removing', True), ('hierarchy', True), ('respond', True), ('original', True), ('expiry', True), ('henceforth', True), ('random', True), ('scheduling', True), ('22', True), ('Updating', True), ('02', True), ('getting', True), ('allocated', True), ('Procedure', True), ('points', True), ('modes', True), ('allowed', True), ('bevor', True), ('justification', True), ('theoretical', True), ('happens', True), ('procedures', True), ('necessary', True), ('One', True), ('extremes', True), ('popular', True), ('ordering', True), ('along', True), ('Celeste', True), ('metric', True), ('reason', True), ('If', True), ('Daniel', True), ('Specialties', True), ('example', True), ('www', True), ('file', True), ('mechanism', True), ('Very', True), ('PingRe', True), ('division', True), ('inversely', True), ('usual', True), ('searched', True), ('goal', True), ('ReplyingThread', True), ('here', True), ('derived', True), ('Thread', True), ('burdens', True), ('learn', True), ('Otherwise', True), ('prices', True), ('An', True), ('remains', True), ('correct', True), ('so', True), ('Hence', True), ('self', True), ('serious', True), ('implicit', True), ('highly', True), ('delegated', True), ('buffers', True), ('method', True), ('required', True), ('its', True), ('subject', True), ('based', True), ('sorted', True), ('0111', True), ('missing', True), ('inter', True), ('263', True), ('index', True), ('restriction', True), ('Function', True), ('options', True), ('Ling', True), ('Programmes', True), ('somewhere', True), ('STORE', True), ('remarks', True), ('randomly', True), ('Bucket', True), ('selected', True), ('time', True), ('methods', True), ('puter', True), ('push', True), ('passes', True), ('echo', True), ('eingesetzt', True), ('During', True), ('Other', True), ('reveal', True), ('age', True), ('Korrektheit', True), ('Whenever', True), ('store', True), ('24Chapter', True), ('constructor', True), ('possibly', True), ('located', True), ('proved', True), ('element', True), ('File', True), ('applicable', True), ('Binary', True), ('parameterizable', True), ('choices', True), ('course', True), ('empty', True), ('NodeReplyBuffer', True), ('ability', True), ('VALUE', True), ('performance', True), ('tried', True), ('DatagramSocket', True), ('highest', True), ('run', True), ('become', True), ('inconvenient', True), ('Those', True), ('add', True), ('Newly', True), ('eines', True), ('scalable', True), ('separating', True), ('calling', True), ('30', True), ('by', True), ('most', True), ('Two', True), ('simplify', True), ('Resilient', True), ('string', True), ('Um', True), ('validity', True), ('packet', True), ('100TB', True), ('They', True), ('address', True), ('occurred', True), ('discovered', True), ('probably', True), ('1101', True), ('price', True), ('tree', True), ('extent', True), ('expired', True), ('Quick', True), ('between', True), ('measurements', True), ('becomes', True), ('imported', True), ('Lookup', True), ('explain', True), ('clarify', True), ('Fortunately', True), ('finishing', True), ('queried', True), ('shows', True), ('creation', True), ('At', True), ('replied', True), ('scratch', True), ('overhead', True), ('need', True), ('leakage', True), ('Dieser', True), ('Out', True), ('Reasons', True), ('a', True), ('k', True), ('Addi', True), ('on', True), ('distances', True), ('exception', True), ('fact', True), ('assuming', True), ('more', True), ('xored', True), ('HashSets', True), ('treat', True), ('fills', True), ('Maymounkov', True), ('parameter', True), ('StoreMessage', True), ('waits', True), ('2002', True), ('represented', True), ('create', True), ('nature', True), ('efficient', True), ('what', True), ('pinging', True), ('assigend', True), ('uses', True), ('Joseph', True), ('return', True), ('reaction', True), ('cell', True), ('replicated', True), ('obtain', True), ('x', True), ('com', True), ('integration', True), ('handling', True), ('approximately', True), ('similarities', True), ('summed', True), ('stead', True), ('about', True), ('assume', True), ('Mess', True), ('preferred', True), ('participate', True), ('jlint', True), ('therefore', True), ('guaranteed', True), ('action', True), ('net', True), ('requirements', True), ('disabled', True), ('distinction', True), ('names', True), ('thought', True), ('adding', True), ('clients', True), ('Professor', True), ('15As', True), ('application', True), ('hierarchical', True), ('former', True), ('Zhao', True), ('Design', True), ('fields', True), ('Stores', True), ('profits', True), ('XOR', True), ('two', True), ('similar', True), ('echoed', True), ('else', True), ('update', True), ('Programming', True), ('NodeReplyMessages', True), ('improvements', True), ('implementa', True), ('bookkeeping', True), ('Spain', True), ('Priority', True), ('join', True), ('closest', True), ('para', True), ('false', True), ('est', True), ('apply', True), ('generated', True), ('named', True), ('IP', True), ('task', True), ('signed', True), ('steht', True), ('computers', True), ('Maybe', True), ('Introduction', True), ('remember', True), ('running', True), ('David', True), ('109', True), ('ascending', True), ('longer', True), ('tempted', True), ('desired', True), ('183', True), ('get', True), ('Republishing', True), ('positional', True), ('wasted', True), ('17', True), ('143', True), ('enumeration', True), ('maximal', True), ('implies', True), ('feasible', True), ('Of', True), ('answers', True), ('Die', True), ('Java', True), ('INFOCOM', True), ('stable', True), ('strong', True), ('Gummadi', True), ('general', True), ('RPCs', True), ('routing', True), ('target', True), ('Extraction', True), ('chapter', True), ('added', True), ('introduces', True), ('29', True), ('RefresherEntrys', True), ('describes', True), ('Stefan', True), ('argument', True), ('reusing', True), ('flag', True), ('contained', True), ('represents', True), ('Closest', True), ('actually', True), ('organizing', True), ('Notice', True), ('smallest', True), ('generation', True), ('31Chapter', True), ('terminated', True), ('Distributed', True), ('ReceivingThread', True), ('tem', True), ('Central', True), ('calculate', True), ('data', True), ('offers', True), ('updated', True), ('Intermediate', True), ('NODE', True), ('Besides', True), ('frequent', True), ('share', True), ('differences', True), ('greater', True), ('additional', True), ('Scott', True), ('whenever', True), ('total', True), ('Steven', True), ('quickly', True), ('indicating', True), ('Messa', True), ('From', True), ('changes', True), ('due', True), ('Thesis', True), ('representing', True), ('z', True), ('Baur', True), ('though', True), ('No', True), ('no', True), ('exchanged', True), ('Buffers', True), ('Stutzbach', True), ('context', True), ('sense', True), ('search', True), ('According', True), ('prolong', True), ('But', True), ('make', True), ('Implementing', True), ('properties', True), ('executed', True), ('entry', True), ('M', True), ('mostly', True), ('capacities', True), ('Specifically', True), ('continue', True), ('assurance', True), ('maintain', True), ('interfaces', True), ('during', True), ('faults', True), ('14', True), ('policy', True), ('Location', True), ('Receiving', True), ('gewöhnliche', True), ('Sys', True), ('Rejaie', True), ('tional', True), ('does', True), ('subtree', True), ('disadvantage', True), ('decide', True), ('Zurich', True), ('caching', True), ('full', True), ('Static', True), ('intended', True), ('signifies', True), ('IPTPS02', True), ('repositories', True), ('step', True), ('countermea', True), ('problems', True), ('accom', True), ('differentiation', True), ('Distinguishing', True), ('Thus', True), ('30the', True), ('functioning', True), ('works', True), ('PingReplyMessage', True), ('applications', True), ('now', True), ('activity', True), ('explicit', True), ('provide', True), ('nodesEnquired', True), ('stores', True), ('continues', True), ('life', True), ('to', True), ('Message', True), ('node', True), ('Kademlia', True), ('terminates', True), ('klar', True), ('Thanks', True), ('refreshed', True), ('1001110110000101', True), ('Rounds', True), ('cycle', True), ('come', True), ('nearly', True), ('circumstance', True), ('split', True), ('Offloading', True), ('Computing', True), ('DOLR', True), ('responded', True), ('put', True), ('filling', True), ('working', True), ('Glenn', True), ('Zusammenhang', True), ('Tapestry', True), ('meantime', True), ('publishes', True), ('retrieval', True), ('Protection', True), ('contrast', True), ('is', True), ('06', True), ('executes', True), ('student', True), ('addReceivedMessage', True), ('parts', True), ('John', True), ('Computer', True), ('implement', True), ('Peer', True), ('timeouts', True), ('year', True), ('NodeReply', True), ('getFirstMessage', True), ('above', True), ('slightly', True), ('Repräsentation', True), ('needs', True), ('Marcel', True), ('opportunity', True), ('Reza', True), ('procedure', True), ('java', True), ('unreliability', True), ('virtual', True), ('interesting', True), ('turned', True), ('adds', True), ('Depending', True), ('always', True), ('Both', True), ('cf', True), ('redundant', True), ('ids', True), ('Query', True), ('indeed', True), ('Request', True), ('event', True), ('proposed', True), ('knows', True), ('Implementation', True), ('arrived', True), ('heavy', True), ('other', True), ('Study', True), ('ensured', True), ('protecting', True), ('earlier', True), ('matching', True), ('hour', True), ('Concept', True), ('Storage', True), ('prefix', True), ('phase', True), ('failed', True), ('reserved', True), ('Alternative', True), ('check', True), ('operate', True), ('lower', True), ('local', True), ('intervals', True), ('20', True), ('times', True), ('Non', True), ('Because', True), ('existing', True), ('Reasonably', True), ('follow', True), ('choice', True), ('planet', True), ('possible', True), ('arrive', True), ('RequestMessage', True), ('Replying', True), ('All', True), ('Pin', True), ('ready', True), ('offloaded', True), ('conclude', True), ('paar', True), ('once', True), ('resistance', True), ('an', True), ('described', True), ('abandoning', True), ('Several', True), ('1110', True), ('already', True), ('imposed', True), ('fail', True), ('reception', True), ('bestehenden', True), ('at', True), ('Engineering', True), ('continued', True), ('Concurrent', True), ('triple', True), ('Store', True), ('Assuming', True), ('Losers', True), ('separate', True), ('relies', True), ('PingReplyMessages', True), ('capacity', True), ('Need', True), ('version', True), ('takes', True), ('Course', True), ('PingRepReqMessage', True), ('refreshing', True), ('map', True), ('c', True), ('Similarly', True), ('way', True), ('specifying', True), ('allowing', True), ('setRec', True), ('lists', True), ('saves', True), ('referred', True), ('Vol', True), ('R', True), ('consist', True), ('late', True), ('O', True), ('avoided', True), ('reaches', True), ('List', True), ('Remind', True), ('composed', True), ('Format', True), ('payload', True), ('Overlay', True), ('process', True), ('straight', True), ('authors', True), ('sicherzustellen', True), ('socket', True), ('5', True), ('carries', True), ('pay', True), ('according', True), ('Information', True), ('Certain', True), ('inserted', True), ('included', True), ('35', True), ('participating', True), ('addi', True), ('joining', True), ('sucht', True), ('d', True), ('fake', True), ('This', True), ('following', True), ('makes', True), ('concrete', True), ('amount', True), ('request', True), ('pings', True), ('Always', True), ('includes', True), ('Replies', True), ('main', True), ('results', True), ('turn', True), ('path', True), ('0101', True), ('part', True), ('switching', True), ('ation', True), ('one', True), ('network', True), ('all', True), ('test', True), ('Data', True), ('I', True), ('currently', True), ('shall', True), ('exchange', True), ('keeps', True), ('state', True), ('where', True), ('Looked', True), ('While', True), ('Conversely', True), ('actual', True), ('administrative', True), ('remainder', True), ('supplied', True), ('temporary', True), ('accomplished', True), ('Hosts', True), ('Berkeley', True), ('even', True), ('infocom06', True), ('list', True), ('Used', True), ('supported', True), ('interrupt', True), ('relied', True), ('wurden', True), ('33', True), ('eventually', True), ('illustrated', True), ('first', True), ('lot', True), ('reasonable', True), ('involved', True), ('remove', True), ('suitable', True), ('PUB', True), ('solution', True), ('those', True), ('certain', True), ('suggest', True), ('and', True), ('pairs', True), ('Critical', True), ('100K', True), ('looks', True), ('keeping', True), ('illustration', True), ('um', True), ('collector', True), ('publishing', True), ('afterwards', True), ('fulfilled', True), ('merging', True), ('of', True), ('connected', True), ('basic', True), ('structures', True), ('package', True), ('increases', True), ('position', True), ('Advisor', True), ('threaded', True), ('alive', True), ('equivalent', True), ('propery', True), ('arbitrary', True), ('determine', True), ('fresh', True), ('Providing', True), ('10above', True), ('sufficient', True), ('valuable', True), ('http', True), ('initially', True), ('gigabyte', True), ('internally', True), ('standing', True), ('near', True), ('option', True), ('launched', True), ('out', True), ('below', True), ('wide', True), ('itself', True), ('PlattnerAbstract', True), ('continuation', True), ('soll', True), ('There', True), ('determined', True), ('suffices', True), ('Discovering', True), ('platform', True), ('comprises', True), ('severe', True), ('whether', True), ('fit', True), ('providing', True), ('quest', True), ('Translating', True), ('des', True), ('Spori', True), ('policies', True), ('may', True), ('type', True), ('often', True), ('clearly', True), ('i', True), ('That', True), ('employee', True), ('prone', True), ('had', True), ('another', True), ('means', True), ('RequestMessages', True), ('pinged', True), ('connection', True), ('receives', True), ('verschiedene', True), ('description', True), ('size', True), ('Sharing', True), ('large', True), ('operation', True), ('TCP', True), ('increasing', True), ('wait', True), ('port', True), ('triggered', True), ('influence', True), ('Recurring', True), ('match', True), ('protocol', True), ('bucket', True), ('Together', True), ('cases', True), ('then', True), ('receiving', True), ('112', True), ('TreeMap', True), ('19message', True), ('identical', True), ('complete', True), ('Explicit', True), ('Table', True), ('Conclusions', True), ('when', True), ('Bernhard', True), ('bugs', True), ('frequently', True), ('management', True), ('responses', True), ('getMessage', True), ('Right', True), ('back', True), ('FindNode', True), ('circumstances', True), ('such', True), ('different', True), ('emphasis', True), ('choose', True), ('PingRepReq', True), ('specifications', True), ('analyzed', True), ('rescheduling', True), ('establishment', True), ('graphs', True), ('sends', True), ('Most', True), ('carried', True), ('place', True), ('Internet', True), ('closer', True), ('networking', True), ('tested', True), ('introduced', True), ('source', True), ('PingReplyBuffer', True), ('stands', True), ('proper', True), ('uoregon', True), ('expected', True), ('0110', True), ('remain', True), ('Or', True), ('hosts', True), ('ing', True), ('happen', True), ('Summer', True), ('semester', True), ('result', True), ('this', True), ('assumption', True), ('situations', True), ('property', True), ('dimensional', True), ('pair', True), ('Between', True), ('Gribble', True), ('probability', True), ('representation', True), ('suspend', True), ('reasons', True), ('10', True), ('20Thus', True), ('round', True), ('removed', True), ('loss', True), ('arose', True), ('Key', True), ('replicate', True), ('care', True), ('sending', True), ('do', True), ('opposed', True), ('object', True), ('topology', True), ('Therewith', True), ('behind', True), ('mulit', True), ('some', True), ('error', True), ('higher', True), ('wanted', True), ('queue', True), ('Raphael', True), ('Work', True), ('slower', True), ('unless', True), ('3', True), ('individually', True), ('use', True), ('consists', True), ('Concretely', True), ('1010', True), ('directly', True), ('yields', True), ('together', True), ('wants', True), ('Selection', True), ('accept', True), ('gateway', True), ('accepts', True), ('23position', True), ('ReplyingThreads', True), ('locating', True), ('drawbacks', True), ('informa', True), ('know', True), ('violate', True), ('conversion', True), ('top', True), ('2001', True), ('Sean', True), ('why', True), ('Receiver', True), ('LinkedList', True), ('employs', True), ('requests', True), ('justify', True), ('8', True), ('Differences', True), ('110', True), ('being', True), ('new', True), ('0', True), ('machine', True), ('calculated', True), ('shortly', True), ('Once', True), ('Im', True), ('particularities', True), ('received', True), ('true', True), ('types', True), ('explains', True), ('written', True), ('documented', True), ('call', True), ('distinguish', True), ('Unfortunately', True), ('January', True), ('Extract', True), ('constant', True), ('computer', True), ('Semester', True), ('thread', True), ('partition', True), ('who', True), ('Further', True), ('checks', True), ('carrying', True), ('Integration', True), ('Future', True), ('1', True), ('introduce', True), ('13representing', True), ('mind', True), ('retrieves', True), ('exists', True), ('practical', True), ('optimal', True), ('Method', True), ('the', True), ('Bibliography', True), ('reached', True), ('could', True), ('buffered', True), ('Deployment', True), ('differ', True), ('enables', True), ('important', True), ('suggests', True), ('8192', True), ('cached', True), ('key', True), ('variable', True), ('Important', True), ('extracted', True), ('System', True), ('Introducing', True), ('manner', True), ('consistency', True), ('Constructing', True), ('Saroiu', True), ('attacks', True), ('Improving', True), ('UC', True), ('look', True), ('pRe', True), ('storing', True), ('PublishedValuesRefresherEntry', True), ('Lookups', True), ('Lists', True), ('Automatic', True), ('trade', True), ('Demultiple', True), ('Structure', True), ('structure', True), ('through', True), ('Jeremy', True), ('0011', True), ('appears', True), ('entries', True), ('contacts', True), ('Id', True), ('problem', True), ('2Contents', True), ('moved', True), ('Tool', True), ('werden', True), ('solutions', True), ('lowest', True), ('ReplyMessage', True), ('newly', True), ('1111', True), ('serve', True), ('α', True), ('Specifications', True), ('used', True), ('The', True), ('Deployed', True), ('old', True), ('field', True), ('Node', True), ('checkMessage', True), ('timeout', True), ('correctly', True), ('Becomes', True), ('Proceedings', True), ('replication', True), ('More', True), ('Topology', True), ('insight', True), ('KeyValueRefresherEntry', True), ('PingRequestMessage', True), ('implicitly', True), ('Bruno', True), ('occur', True), ('vital', True), ('loosers', True), ('places', True), ('A', True), ('visited', True), ('returns', True), ('consider', True), ('users', True), ('einem', True), ('eply', True), ('Instead', True), ('implemented', True), ('common', True), ('complexity', True), ('or', True), ('extensions', True), ('each', True), ('explained', True), ('Value', True), ('executing', True), ('good', True), ('dedicated', True), ('dimension', True), ('Architecture', True), ('forward', True), ('MA', True), ('retains', True), ('arguments', True), ('Reaction', True), ('concurrent', True), ('replicating', True), ('reinserted', True), ('1001', True), ('decision', True), ('concepts', True), ('von', True), ('objects', True), ('respectively', True), ('1011', True), ('piggy', True), ('difference', True), ('dependent', True), ('slice', True), ('proposes', True), ('constraints', True), ('case', True), ('21used', True), ('tasks', True), ('2006', True), ('chosen', True), ('evaluates', True), ('duplicate', True), ('routed', True), ('costs', True), ('events', True), ('retrieved', True), ('Periodic', True), ('celeste_', True), ('regardless', True), ('6', True), ('simple', True), ('Imagine', True), ('them', True), ('reset', True), ('byte', True), ('design', True), ('various', True), ('informations', True), ('space', True), ('blocked', True), ('drawback', True), ('forwarded', True), ('overview', True), ('host', True), ('Measurement', True), ('tests', True), ('garbage', True), ('Runtime', True), ('cover', True), ('comes', True), ('q', True), ('successful', True), ('performed', True), ('contents', True), ('Buckets', True), ('equest', True), ('active', True), ('user', True), ('programming', True), ('2', True), ('drops', True), ('drives', True), ('give', True), ('CSE', True), ('number', True), ('Every', True), ('blog', True), ('enable', True), ('decisions', True), ('choosing', True), ('Speicherungs', True), ('xer', True), ('there', True), ('lead', True), ('bit', True), ('considered', True), ('Even', True), ('regarding', True), ('setInterrupt', True), ('Many', True), ('security', True), ('Hash', True), ('comparison', True), ('technical', True), ('achieved', True), ('elements', True), ('fourth', True), ('any', True), ('how', True), ('nevertheless', True), ('leaves', True), ('exactly', True), ('Rom', True), ('Arten', True), ('access', True), ('decided', True), ('Additionally', True), ('alleviated', True), ('products', True), ('org', True), ('constraint', True), ('like', True), ('performing', True), ('simpler', True), ('Indirectly', True), ('upon', True), ('saving', True), ('ordinary', True), ('augmented', True), ('pingReplyBuffer', True), ('define', True), ('65535', True), ('detailed', True), ('emule', True), ('folgend', True), ('umgewandelt', True), ('consuming', True), ('beschreibt', True), ('15', True), ('0001', True), ('die', True), ('assigning', True), ('implementing', True), ('buffer', True), ('having', True), ('1001110110100001', True), ('Now', True), ('actions', True), ('message', True), ('yielding', True), ('execute', True), ('work', True), ('steps', True), ('redistributed', True), ('allows', True), ('March', True), ('finished', True), ('maps', True), ('small', True), ('keys', True), ('Maps', True), ('overflow', True), ('companies', True), ('these', True), ('Widely', True), ('eviction', True), ('his', True), ('particular', True), ('soon', True), ('set', True), ('notably', True), ('provides', True), ('limited', True), ('preceding', True), ('perform', True), ('freed', True), ('2004', True), ('cache', True), ('In', True), ('outperform', True), ('Temporary', True), ('StoreMessages', True), ('Another', True), ('achieve', True), ('locally', True), ('charged', True), ('take', True), ('9has', True), ('27', True), ('12Chapter', True), ('reply', True), ('ValueReplyBuffer', True), ('forgery', True), ('understood', True), ('im', True), ('Based', True), ('oper', True), ('table', True), ('demultiplexer', True), ('continuously', True), ('removes', True), ('Report', True), ('cleared', True), ('member', True), ('28', True), ('transmitted', True), ('wastes', True), ('reza', True), ('real', True), ('serialization', True), ('sustained', True), ('beneficial', True), ('lie', True), ('locate', True), ('principle', True), ('34', True), ('Bericht', True), ('Queue', True), ('mentioned', True), ('critical', True), ('Basic', True), ('Replication', True), ('threads', True), ('pointer', True), ('defined', True), ('clear', True), ('discarded', True), ('redundancy', True), ('publisher', True), ('tail', True), ('notion', True), ('id', True), ('Representation', True), ('over', True), ('waited', True), ('ones', True), ('equal', True), ('der', True), ('nodes', True), ('will', True), ('intention', True), ('conducted', True), ('increase', True), ('Pinging', True), ('Storing', True), ('proportional', True), ('precisely', True), ('declare', True), ('importantly', True), ('imagined', True), ('execution', True), ('tuple', True), ('gets', True), ('introduction', True), ('appropriate', True), ('sind', True), ('pReq', True), ('27Searching', True), ('Subsequently', True), ('possibilities', True), ('Set', True), ('hashing', True), ('exclusively', True), ('instruct', True), ('Messages', True), ('Having', True), ('implementation', True), ('Jlint', True), ('end', True), ('Sun', True), ('largest', True), ('obviously', True), ('Stribling', True), ('Testing', True), ('Why', True), ('responsible', True), ('communicates', True), ('per', True), ('Written', True), ('sections', True), ('With', True), ('overlay', True), ('complex', True), ('called', True), ('Einführung', True), ('leads', True), ('risk', True), ('Serializable', True), ('prevent', True), ('It', True), ('rule', True), ('needed', True), ('automaticstorage', True), ('revealed', True), ('rescheduled', True), ('name', True), ('Networks', True), ('found', True), ('saved', True), ('HashMap', True), ('maintains', True), ('Like', True), ('accepted', True), ('Reque', True), ('term', True), ('incoming', True), ('searches', True), ('Mazières', True), ('eigentliche', True), ('PlanetLab', True), ('Architektur', True), ('been', True), ('e', True), ('defining', True), ('Such', True), ('less', True), ('convenient', True), ('terms', True), ('receive', True), ('whole', True), ('announcement', True), ('connections', True), ('incompatible', True), ('specifies', True), ('API', True), ('timespan', True), ('discussion', True), ('SA', True), ('detected', True), ('Performance', True), ('Objects', True), ('Global', True), ('ge', True), ('advantage', True), ('corresponds', True), ('searching', True), ('distance', True), ('answered', True), ('exchanges', True), ('Not', True), ('getTimestamp', True), ('Science', True), ('flags', True), ('thorough', True), ('163', True), ('immediately', True), ('listen', True), ('consistent', True), ('Anthony', True), ('unique', True), ('storages', True), ('words', True), ('off', True), ('instances', True), ('subsequent', True), ('handled', True), ('thus', True), ('PublishedValuesContainer', True), ('has', True), ('subsequently', True), ('gotten', True), ('extended', True), ('issues', True), ('7', True), ('looking', True), ('Ping', True), ('obvious', True), ('denial', True), ('range', True), ('treated', True), ('information', True), ('static', True), ('that', True), ('later', True), ('triangle', True), ('reliability', True), ('until', True), ('deciding', True), ('yield', True), ('decreased', True), ('requested', True), ('emerged', True), ('sabotage', True), ('scale', True), ('understand', True), ('Utility', True), ('seemed', True), ('designates', True), ('deal', True), ('disadvantages', True), ('Prof', True), ('1001111001011110', True), ('situation', True), ('they', True), ('applies', True), ('portion', True), ('11', True), ('util', True), ('Number', True), ('32Chapter', True), ('Petar', True), ('server', True), ('among', True), ('Developer', True), ('Appropriate', True), ('warnings', True), ('via', True), ('Service', True), ('although', True), ('enabled', True), ('setExp', True), ('USA', True), ('zwischenzuspeichern', True), ('their', True), ('Assume', True), ('24', True), ('much', True), ('illustrates', True), ('ETH', True), ('ping', True), ('stated', True), ('Upon', True), ('published', True), ('Systems', True), ('Arrays', True), ('misused', True), ('replies', True), ('tool', True), ('unrequested', True), ('maintained', True), ('facilitated', True), ('assigned', True), ('graphically', True), ('als', True), ('accord', True), ('my', True), ('Retrieval', True), ('FindValueMessages', True), ('every', True), ('must', True), ('automatic', True), ('For', True), ('Furthermore', True), ('developing', True), ('Managing', True), ('for', True), ('Pool', True), ('only', True), ('while', True), ('Ku', True), ('send', True), ('memory', True), ('1100', True), ('Referring', True), ('figure', True), ('iterations', True), ('Given', True), ('blocking', True), ('otherwise', True), ('Tables', True), ('Some', True), ('Objekte', True), ('Journal', True), ('timely', True), ('FIND', True), ('replying', True), ('announcing', True), ('up', True), ('very', True), ('distinct', True), ('13', True), ('really', True), ('DHTs', True), ('modified', True), ('yet', True), ('values', True), ('Artho', True), ('fulfill', True), ('was', True), ('challenges', True), ('correspond', True), ('16', True), ('entity', True), ('rice', True), ('equivalently', True), ('still', True), ('would', True), ('hash', True), ('Received', True), ('JREs', True), ('sure', True), ('themselves', True), ('KeyValueContainer', True), ('code', True), ('receiver', True), ('level', True), ('danger', True), ('ValueReplyMessages', True), ('Threads', True), ('j2se', True), ('können', True), ('vor', True), ('effect', True), ('null', True), ('4List', True), ('latter', True), ('again', True), ('Structures', True), ('implementations', True), ('hold', True), ('function', True), ('7Celeste', True), ('These', True), ('confusing', True), ('extractable', True), ('Therefore', True), ('direct', True), ('functions', True), ('0100', True), ('multi', True), ('view', True), ('serves', True), ('bitwise', True), ('namely', True), ('Object', True), ('indirectly', True), ('mapping', True), ('useless', True), ('reach', True), ('July', True), ('announced', True), ('developed', True), ('areas', True), ('geschrieben', True), ('Krishna', True), ('idea', True), ('sorting', True), ('s', True), ('systems', True), ('from', True), ('waiting', True), ('feature', True), ('explanation', True), ('32', True), ('9860', True), ('contain', True), ('Arbeiten', True), ('many', True), ('Dr', True), ('Obviously', True), ('constructed', True), ('realize', True), ('after', True), ('tionally', True), ('Problem', True), ('artho', True), ('scenario', True), ('answering', True), ('but', True), ('ensure', True), ('dc', True), ('api', True), ('1001111110100001', True), ('done', True), ('myself', True), ('Echoed', True), ('originating', True), ('reconsider', True), ('cannot', True), ('New', True), ('functionalities', True), ('adjust', True), ('favor', True), ('growing', True), ('instructs', True), ('few', True), ('Washington', True), ('thesis', True), ('Through', True), ('RefresherEntriys', True), ('competing', True), ('hosting', True), ('FindNodeMessage', True), ('nc', True), ('caused', True), ('either', True), ('Map', True), ('query', True), ('iterates', True), ('structured', True), ('addition', True), ('recurring', True), ('instead', True), ('Som', True), ('next', True), ('strange', True), ('better', True), ('Tardis', True), ('assumed', True), ('Y', True), ('mit', True), ('580', True), ('implements', True), ('created', True), ('Requ', True), ('How', True), ('as', True), ('array', True), ('Payload', True), ('chooses', True), ('picks', True), ('und', True), ('Byte', True), ('least', True), ('likely', True), ('plished', True), ('listens', True), ('programmed', True), ('mainly', True), ('hours', True), ('involves', True), ('analyzer', True), ('holds', True), ('6To', True), ('participates', True), ('ever', True), ('periodic', True), ('PingReqRepMessage', True), ('last', True), ('RefresherEntry', True), ('Buffern', True), ('approach', True), ('Operations', True), ('computing', True), ('bottom', True), ('ValueReply', True), ('non', True), ('report', True), ('favourable', True), ('tolerated', True), ('creating', True), ('none', True), ('section', True), ('arrays', True), ('are', True), ('point', True), ('variables', True), ('Second', True), ('expensive', True), ('Save', True), ('Being', True), ('several', True), ('Discovered', True), ('Communications', True), ('C', True), ('Goal', True), ('plementation', True), ('project', True), ('huge', True), ('against', True), ('Caronni', True), ('format', True), ('expiration', True), ('Class', True), ('convenience', True), ('Time', True), ('reduce', True), ('9', True), ('beginning', True), ('contributes', True), ('able', True), ('bootstrapping', True), ('g', True), ('28Conversely', True), ('keep', True), ('JRE', True), ('worldwide', True), ('track', True), ('Metric', True), ('essentially', True), ('communicated', True), ('environment', True), ('respective', True), ('given', True), ('losers', True), ('were', True), ('supposed', True), ('Finding', True), ('placed', True), ('high', True), ('html', True), ('passed', True), ('effectively', True), ('NodeReplyMessage', True), ('republished', True), ('Each', True), ('IPv4', True), ('Adding', True), ('paragraph', True), ('19', True), ('class', True), ('stops', True), ('operations', True), ('represent', True), ('concept', True), ('recalculated', True), ('Barcelona', True), ('generate', True), ('short', True), ('basically', True), ('looked', True), ('backed', True), ('Rhea', True), ('distributed', True), ('lifetime', True), ('seen', True), ('signify', True), ('propose', True), ('three', True), ('Unrequested', True), ('Calculation', True), ('caller', True), ('also', True), ('Sender', True), ('head', True), ('overall', True), ('just', True), ('joins', True), ('third', True), ('Public', True), ('UW', True), ('carry', True), ('provided', True), ('0000', True), ('Clearly', True), ('extract', True), ('definines', True), ('across', True), ('importance', True), ('since', True), ('say', True), ('waste', True), ('293', True), ('After', True), ('reaching', True), ('usually', True), ('When', True), ('emptied', True), ('lab', True), ('advantages', True), ('quite', True), ('contact', True), ('converted', True), ('suspends', True), ('specific', True), ('it', True), ('extensively', True), ('Analyzer', True), ('delayed', True), ('linked', True), ('expect', True), ('Up', True), ('Fur', True), ('Retrieving', True), ('Ben', True), ('consideration', True), ('insided', True), ('Against', True), ('Term', True), ('visits', True), ('später', True), ('requirement', True), ('external', True), ('malicious', True), ('Satisfying', True), ('imposes', True), ('DatagramPacket', True), ('current', True), ('durchgeführt', True), ('want', True), ('On', True), ('approaches', True), ('than', True), ('without', True), ('sum', True), ('modification', True), ('interface', True), ('160', True), ('in', True), ('FindValue', True), ('IPTPS', True), ('cs', True), ('ensures', True), ('Bytes', True), ('known', True), ('possibility', True), ('ValueReplyMessage', True), ('0010', True), ('Laboratory', True), ('whereas', True), ('checked', True), ('faked', True), ('favored', True), ('employed', True), ('aim', True), ('item', True), ('replicates', True), ('transfered', True), ('violated', True), ('worse', True), ('As', True), ('architectural', True), ('republish', True), ('Thereafter', True), ('Software', True), ('Zusammenfassung', True), ('structrue', True), ('versions', True), ('specification', True), ('interrupted', True), ('go', True), ('Wichtige', True), ('necessity', True), ('thermore', True), ('ArrayList', True), ('produces', True), ('falls', True), ('FindValueMessage', True), ('Different', True), ('avoidable', True), ('Hierarchical', True), ('t', True), ('purpose', True), ('Tasks', True), ('4', True), ('To', True), ('hard', True), ('depicted', True), ('be', True), ('contacted', True), ('begin', True), ('special', True), ('protection', True), ('built', True), ('ausstehend', True), ('controlled', True), ('lookups', True), ('central', True), ('priority', True), ('University', True), ('own', True), ('Repositories', True), ('custom', True), ('waisted', True), ('12', True), ('best', True), ('Similar', True), ('javadoc', True), ('annual', True), ('aid', True), ('collisions', True), ('DHT', True), ('establish', True), ('pdf', True), ('messages', True), ('scheduled', True), ('eMule', True), ('translating', True), ('pity', True), ('portions', True), ('zero', True), ('speed', True), ('whitepapers', True), ('legitimate', True), ('fastest', True), ('company', True), ('schedule', True), ('Javadoc', True), ('finding', True), ('25Replicating', True), ('specialties', True), ('First', True), ('inside', True), ('n', True), ('trivial', True), ('measures', True), ('consequence', True), ('sequence', True), ('efficiently', True), ('offs', True), ('indexes', True), ('Reply', True), ('with', True), ('P', True), ('buckets', True), ('01', True), ('655535', True), ('something', True), ('second', True), ('arrival', True), ('moment', True), ('storage', True), ('stored', True), ('further', True), ('Locating', True), ('intermediate', True), ('flush', True), ('Either', True), ('consequences', True), ('updateTimestamp', True), ('contains', True), ('detect', True), ('service', True), ('easy', True), ('concludes', True), ('readability', True), ('architecture', True), ('generic', True), ('recently', True), ('meaningful', True), ('details', True), ('symmetric', True), ('duration', True), ('Refresher', True), ('develop', True), ('far', True), ('alternative', True), ('order', True), ('find', True), ('fast', True), ('if', True), ('instance', True), ('docs', True), ('essage', True), ('Separate', True), ('Should', True), ('containing', True), ('worst', True), ('retrieve', True), ('because', True), ('republishing', True), ('correctness', True), ('RPC', True), ('preliminary', True), ('loop', True), ('Figure', True), ('specialty', True), ('unused', True), ('Pairs', True), ('rounds', True), ('log', True), ('value', True), ('ein', True), ('taken', True), ('sender', True), ('mean', True), ('specified', True), ('Figures', True), ('concurrently', True), ('D', True), ('returned', True), ('future', True), ('describe', True), ('Technical', True), ('peer', True), ('evenly', True), ('Germano', True), ('collect', True), ('RefresherThread', True), ('Answering', True), ('avoid', True), ('iteratively', True), ('drawing', True), ('Selected', True), ('pool', True), ('installing', True), ('unidirectional', True), ('lookup', True), ('Random', True), ('y', True), ('Exactly', True), ('whose', True), ('processing', True), ('outside', True), ('before', True), ('discard', True), ('see', True), ('biatowicz', True), ('standard', True), ('within', True), ('let', True), ('copied', True), ('distribute', True), ('edu', True), ('Department', True), ('ensuring', True), ('system', True), ('did', True), ('single', True), ('causes', True), ('calculates', True), ('length', True), ('1001110110111110', True), ('announce', True), ('Motivation', True), ('characteristics', True), ('exponentially', True), ('symmetry', True), ('describing', True), ('fashion', True), ('1000', True), ('noch', True), ('enough', True), ('challenge', True), ('close', True), ('33Chapter', True), ('Environment', True), ('except', True), ('occurs', True), ('chart', True), ('definierten', True), ('Compare', True), ('completely', True), ('checking', True), ('never', True), ('online', True), ('can', True), ('doing', True), ('Cambridge', True), ('21', True), ('entered', True), ('annoying', True), ('UDP', True), ('strings', True), ('route', True), ('converge', True), ('pointers', True), ('bytes', True), ('unlikely', True), ('kad', True), ('Merkmale', True), ('usage', True), ('favorable', True), ('same', True), ('which', True), ('normal', True), ('verified', True), ('arrives', True), ('sufficiently', True), ('Areas', True), ('inquired', True), ('tion', True), ('compared', True), ('zu', True), ('hand', True), ('have', True), ('Requests', True), ('formulate', True), ('sent', True), ('Entries', True), ('Tests', True), ('identifier', True), ('PUC', True), ('too', True), ('communicate', True), ('sharing', True), ('What', True), ('Huang', True), ('base', True), ('long', True), ('empirically', True), ('aware', True), ('enforces', True), ('shown', True), ('answer', True), ('descending', True), ('program', True), ('classes', True), ('both', True), ('24Besides', True), ('neighbors', True), ('hit', True), ('fixed', True), ('General', True), ('34Bibliography', True), ('not', True), ('form', True), ('April', True), ('well', True), ('iterating', True), ('transient', True), ('logical', True), ('combinations', True), ('receipt', True), ('kind', True), ('performs', True), ('nr', True), ('defines', True), ('forced', True), ('Bootstrapping', True), ('measure', True), ('functionality', True), ('concurrency', True), ('into', True), ('Nodes', True), ('Network', True), ('sun', True), ('support', True), ('PingRepRepMessage', True), ('Remember', True), ('17be', True), ('beforehand', True), ('emitted', True), ('networked', True), ('Note', True), ('lies', True), ('26', True), ('motivation', True), ('desirable', True), ('follows', True), ('re', True), ('statement', True), ('made', True), ('superior', True), ('Conferences', True), ('corresponding', True), ('einmal', True), ('relative', True), ('queries', True), ('load', True), ('should', True), ('foreseeable', True), ('valid', True), ('IEEE', True), ('repository', True), ('Invoking', True), ('pattern', True), ('calls', True), ('8Chapter', True)]
print(len(s))
| 13,624.333333
| 40,858
| 0.577203
| 4,322
| 40,873
| 5.458353
| 0.439611
| 0.001145
| 0.001653
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008891
| 0.105718
| 40,873
| 3
| 40,859
| 13,624.333333
| 0.636518
| 0
| 0
| 0
| 0
| 0
| 0.36566
| 0.001835
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.5
| 0.5
| 0
| 0.5
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
|
0
| 6
|
51ec64976937746babe9e6d2f3c646187cb027ec
| 67
|
py
|
Python
|
fecho/__init__.py
|
thehappydinoa/fecho
|
37d382527cd0f8fe6c1729060ba0e3085c4f6549
|
[
"MIT"
] | 1
|
2020-06-02T17:27:24.000Z
|
2020-06-02T17:27:24.000Z
|
fecho/__init__.py
|
thehappydinoa/fecho
|
37d382527cd0f8fe6c1729060ba0e3085c4f6549
|
[
"MIT"
] | null | null | null |
fecho/__init__.py
|
thehappydinoa/fecho
|
37d382527cd0f8fe6c1729060ba0e3085c4f6549
|
[
"MIT"
] | 1
|
2020-12-11T10:07:37.000Z
|
2020-12-11T10:07:37.000Z
|
from .cli import *
from .client import *
from .exceptions import *
| 16.75
| 25
| 0.731343
| 9
| 67
| 5.444444
| 0.555556
| 0.408163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179104
| 67
| 3
| 26
| 22.333333
| 0.890909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
51fabe2d0a9cd354d3724d40abf58a208941b49a
| 47
|
py
|
Python
|
datasets/__init__.py
|
gmshashank/Deep_Flow_Prediction
|
9b4c388b70a458cddac20258242a6a36965524bc
|
[
"MIT"
] | null | null | null |
datasets/__init__.py
|
gmshashank/Deep_Flow_Prediction
|
9b4c388b70a458cddac20258242a6a36965524bc
|
[
"MIT"
] | null | null | null |
datasets/__init__.py
|
gmshashank/Deep_Flow_Prediction
|
9b4c388b70a458cddac20258242a6a36965524bc
|
[
"MIT"
] | null | null | null |
from .dataset import TurbDataset, ValidDataset
| 23.5
| 46
| 0.851064
| 5
| 47
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 47
| 1
| 47
| 47
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
51fb2cb3c499d58a859146de5060d6c42a04aa25
| 361
|
py
|
Python
|
opytimizer/optimizers/misc/__init__.py
|
anukaal/opytimizer
|
5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9
|
[
"Apache-2.0"
] | 528
|
2018-10-01T20:00:09.000Z
|
2022-03-27T11:15:31.000Z
|
opytimizer/optimizers/misc/__init__.py
|
anukaal/opytimizer
|
5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9
|
[
"Apache-2.0"
] | 17
|
2019-10-30T00:47:03.000Z
|
2022-03-21T11:39:28.000Z
|
opytimizer/optimizers/misc/__init__.py
|
anukaal/opytimizer
|
5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9
|
[
"Apache-2.0"
] | 35
|
2018-10-01T20:03:23.000Z
|
2022-03-20T03:54:15.000Z
|
"""An evolutionary package for all common opytimizer modules.
It contains implementations of miscellaneous-based optimizers.
"""
from opytimizer.optimizers.misc.aoa import AOA
from opytimizer.optimizers.misc.cem import CEM
from opytimizer.optimizers.misc.doa import DOA
from opytimizer.optimizers.misc.gs import GS
from opytimizer.optimizers.misc.hc import HC
| 36.1
| 62
| 0.833795
| 50
| 361
| 6.02
| 0.46
| 0.232558
| 0.398671
| 0.465116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099723
| 361
| 9
| 63
| 40.111111
| 0.926154
| 0.33518
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
cf9e842d7aa12132c45ca8f6bab4bd9722e3c6dc
| 71
|
py
|
Python
|
fypress/public/__init__.py
|
Fy-/FyPress
|
be357134969797f92b94db77c9e3aa863c0094fe
|
[
"MIT"
] | 70
|
2016-06-07T10:17:02.000Z
|
2021-06-23T05:36:03.000Z
|
fypress/public/__init__.py
|
Fy-/FyPress
|
be357134969797f92b94db77c9e3aa863c0094fe
|
[
"MIT"
] | 10
|
2016-06-24T08:17:54.000Z
|
2020-07-24T07:34:39.000Z
|
fypress/public/__init__.py
|
Fy-/FyPress
|
be357134969797f92b94db77c9e3aa863c0094fe
|
[
"MIT"
] | 15
|
2016-06-16T20:40:55.000Z
|
2019-08-06T02:45:52.000Z
|
# -*- coding: utf-8 -*-
from views import public as public_blueprint
| 23.666667
| 45
| 0.690141
| 10
| 71
| 4.8
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017241
| 0.183099
| 71
| 2
| 46
| 35.5
| 0.810345
| 0.295775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
5c50fb3af9e4b8d0c8c0b2256487f47325db8afc
| 202
|
py
|
Python
|
motor/__init__.py
|
mwakok/Molecular-Motor-Dynamics
|
697671c55c2c1fad27c2ddb7a1f28450f973e639
|
[
"Apache-2.0"
] | 2
|
2020-04-30T12:28:11.000Z
|
2021-12-20T16:05:30.000Z
|
motor/__init__.py
|
mwakok/Molecular-Motor-Dynamics
|
697671c55c2c1fad27c2ddb7a1f28450f973e639
|
[
"Apache-2.0"
] | 2
|
2021-06-08T22:37:10.000Z
|
2021-09-08T02:35:23.000Z
|
motor/__init__.py
|
mwakok/Molecular-Motor-Dynamics
|
697671c55c2c1fad27c2ddb7a1f28450f973e639
|
[
"Apache-2.0"
] | null | null | null |
from .calculate_propensity import calculate_propensity
from .update_state import update_state
from .event_type import event_type
from .select_event import select_event
from .simulation import gillespie
| 33.666667
| 54
| 0.876238
| 28
| 202
| 6.035714
| 0.392857
| 0.224852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09901
| 202
| 5
| 55
| 40.4
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5c70310f87871b4e5fe65fb00907468ae7c8db81
| 102,416
|
py
|
Python
|
configurations/coding_plans.py
|
AfricasVoices/Project-WUSC-KEEP-II
|
c135511475361e04bb1358126b7e08ceecc8ef88
|
[
"MIT"
] | null | null | null |
configurations/coding_plans.py
|
AfricasVoices/Project-WUSC-KEEP-II
|
c135511475361e04bb1358126b7e08ceecc8ef88
|
[
"MIT"
] | 22
|
2018-11-25T17:17:08.000Z
|
2021-04-07T10:21:25.000Z
|
configurations/coding_plans.py
|
AfricasVoices/Project-WUSC-KEEP-II
|
c135511475361e04bb1358126b7e08ceecc8ef88
|
[
"MIT"
] | null | null | null |
from core_data_modules.cleaners import somali, swahili, Codes
from core_data_modules.traced_data.util.fold_traced_data import FoldStrategies
from configurations import code_imputation_functions
from configurations.code_schemes import CodeSchemes
from src.lib.configuration_objects import CodingConfiguration, CodingModes, CodingPlan
def clean_age_with_range_filter(text):
"""
Cleans age from the given `text`, setting to NC if the cleaned age is not in the range 10 <= age < 100.
"""
age = swahili.DemographicCleaner.clean_age(text)
if type(age) == int and 10 <= age < 100:
return str(age)
# TODO: Once the cleaners are updated to not return Codes.NOT_CODED, this should be updated to still return
# NC in the case where age is an int but is out of range
else:
return Codes.NOT_CODED
S01_DADAAB_RQA_CODING_PLANS = [
CodingPlan(raw_field="rqa_s01e01_raw",
dataset_name="dadaab_s01e01",
time_field="sent_on",
run_id_field="rqa_s01e01_run_id",
coda_filename="dadaab_s01e01.json",
icr_filename="dadaab_s01e01.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S01E01_REASONS,
coded_field="rqa_s01e01_coded",
analysis_file_key="rqa_s01e01_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S01E01_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s01e01"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s01e02_raw",
dataset_name="dadaab_s01e02",
time_field="sent_on",
run_id_field="rqa_s01e02_run_id",
coda_filename="dadaab_s01e02.json",
icr_filename="dadaab_s01e02.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S01E02_REASONS,
coded_field="rqa_s01e02_coded",
analysis_file_key="rqa_s01e02_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S01E02_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s01e02"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s01e03_raw",
dataset_name="dadaab_s01e03",
time_field="sent_on",
run_id_field="rqa_s01e03_run_id",
coda_filename="dadaab_s01e03.json",
icr_filename="dadaab_s01e03.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S01E03_REASONS,
coded_field="rqa_s01e03_coded",
analysis_file_key="rqa_s01e03_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S01E03_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s01e03"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s01e04_raw",
dataset_name="dadaab_s01e04",
time_field="sent_on",
run_id_field="rqa_s01e04_run_id",
coda_filename="dadaab_s01e04.json",
icr_filename="dadaab_s01e04.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S01E04_REASONS,
coded_field="rqa_s01e04_coded",
analysis_file_key="rqa_s01e04_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S01E04_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s01e04"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s01e05_raw",
dataset_name="dadaab_s01e05",
time_field="sent_on",
run_id_field="rqa_s01e05_run_id",
coda_filename="dadaab_s01e05.json",
icr_filename="dadaab_s01e05.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S01E05_REASONS,
coded_field="rqa_s01e05_coded",
analysis_file_key="rqa_s01e05_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S01E05_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s01e05"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s01e06_raw",
dataset_name="dadaab_s01e06",
time_field="sent_on",
run_id_field="rqa_s01e06_run_id",
coda_filename="dadaab_s01e06.json",
icr_filename="dadaab_s01e06.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S01E06_REASONS,
coded_field="rqa_s01e06_coded",
analysis_file_key="rqa_s01e06_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S01E06_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s01e06"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s01e07_raw",
dataset_name="dadaab_s01e07",
time_field="sent_on",
run_id_field="rqa_s01e07_run_id",
coda_filename="dadaab_s01e07.json",
icr_filename="dadaab_s01e07.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S01E07_REASONS,
coded_field="rqa_s01e07_coded",
analysis_file_key="rqa_s01e07_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S01E07_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s01e07"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s01_intro_raw",
dataset_name="dadaab_s01_intro",
time_field="sent_on",
run_id_field="rqa_s01_intro_run_id",
coda_filename="dadaab_s01_intro.json",
icr_filename="dadaab_s01_intro.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S01_INTRO_REASONS,
coded_field="rqa_s01_intro_coded",
analysis_file_key="rqa_s01_intro_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S01_INTRO_REASONS,
x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s01 intro"),
raw_field_fold_strategy=FoldStrategies.concatenate),
]
S02_DADAAB_RQA_CODING_PLANS = [
CodingPlan(raw_field="community_views_on_girls_education_raw",
dataset_name="dadaab_community_views_on_girls_education",
time_field="sent_on",
coda_filename="dadaab_community_views_on_girls_education.json",
run_id_field="community_views_on_girls_education_run_id",
icr_filename="dadaab_community_views_on_girls_education.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_COMMUNITY_VIEWS_ON_GIRLS_EDUCATION,
coded_field="community_views_on_girls_education_coded",
analysis_file_key="community_views_on_girls_education_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_COMMUNITY_VIEWS_ON_GIRLS_EDUCATION, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value(
"dadaab community views on girls education"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="community_views_on_girls_education_final_raw",
dataset_name="dadaab_community_views_on_girls_education_final",
time_field="sent_on",
coda_filename="dadaab_community_views_on_girls_education_final.json",
run_id_field="community_views_on_girls_education_final_run_id",
icr_filename="dadaab_community_views_on_girls_education_final.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_COMMUNITY_VIEWS_ON_GIRLS_EDUCATION_FINAL,
coded_field="community_views_on_girls_education_final_coded",
analysis_file_key="community_views_on_girls_education_final_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_COMMUNITY_VIEWS_ON_GIRLS_EDUCATION_FINAL, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value(
"dadaab community views on girls education final"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s02e01_raw",
dataset_name="dadaab_s02e01",
time_field="sent_on",
run_id_field="rqa_s02e01_run_id",
coda_filename="dadaab_s02e01.json",
icr_filename="dadaab_s02e01.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S02E01_REASONS,
coded_field="rqa_s02e01_coded",
analysis_file_key="rqa_s02e01_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S02E01_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s02e01"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s02e02_raw",
dataset_name="dadaab_s02e02",
time_field="sent_on",
run_id_field="rqa_s02e02_run_id",
coda_filename="dadaab_s02e02.json",
icr_filename="dadaab_s02e02.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S02E02_REASONS,
coded_field="rqa_s02e02_coded",
analysis_file_key="rqa_s02e02_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S02E02_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s02e02"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s02e03_raw",
dataset_name="dadaab_s02e03",
time_field="sent_on",
run_id_field="rqa_s02e03_run_id",
coda_filename="dadaab_s02e03.json",
icr_filename="dadaab_s02e03.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S02E03_REASONS,
coded_field="rqa_s02e03_coded",
analysis_file_key="rqa_s02e03_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S02E03_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s02e03"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s02e04_raw",
dataset_name="dadaab_s02e04",
time_field="sent_on",
run_id_field="rqa_s02e04_run_id",
coda_filename="dadaab_s02e04.json",
icr_filename="dadaab_s02e04.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S02E04_REASONS,
coded_field="rqa_s02e04_coded",
analysis_file_key="rqa_s02e04_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S02E04_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s02e04"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s02e05_raw",
dataset_name="dadaab_s02e05",
time_field="sent_on",
run_id_field="rqa_s02e05_run_id",
coda_filename="dadaab_s02e05.json",
icr_filename="dadaab_s02e05.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S02E05_REASONS,
coded_field="rqa_s02e05_coded",
analysis_file_key="rqa_s02e05_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S02E05_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s02e05"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s02e06_raw",
dataset_name="dadaab_s02e06",
time_field="sent_on",
run_id_field="rqa_s02e06_run_id",
coda_filename="dadaab_s02e06.json",
icr_filename="dadaab_s02e06.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S02E06_REASONS,
coded_field="rqa_s02e06_coded",
analysis_file_key="rqa_s02e06_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S02E06_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s02e06"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s02e07_raw",
dataset_name="dadaab_s02e07",
time_field="sent_on",
run_id_field="rqa_s02e07_run_id",
coda_filename="dadaab_s02e07.json",
icr_filename="dadaab_s02e07.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S02E07_REASONS,
coded_field="rqa_s02e07_coded",
analysis_file_key="rqa_s02e07_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S02E07_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s02e07"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s02e08_raw",
dataset_name="dadaab_s02e08",
time_field="sent_on",
run_id_field="rqa_s02e08_run_id",
coda_filename="dadaab_s02e08.json",
icr_filename="dadaab_s02e08.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S02E08_REASONS,
coded_field="rqa_s02e08_coded",
analysis_file_key="rqa_s02e08_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S02E08_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s02e08"),
raw_field_fold_strategy=FoldStrategies.concatenate),
]
S03_DADAAB_RQA_CODING_PLANS = [
CodingPlan(raw_field="rqa_s03e01_raw",
dataset_name="dadaab_s03e01",
time_field="sent_on",
run_id_field="rqa_s03e01_run_id",
coda_filename="dadaab_s03e01.json",
icr_filename="dadaab_s03e01.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S03E01_REASONS,
coded_field="rqa_s03e01_coded",
analysis_file_key="rqa_s03e01_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S03E01_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s03e01"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s03e02_raw",
dataset_name="dadaab_s03e02",
time_field="sent_on",
run_id_field="rqa_s03e02_run_id",
coda_filename="dadaab_s03e02.json",
icr_filename="dadaab_s03e02.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S03E02_REASONS,
coded_field="rqa_s03e02_coded",
analysis_file_key="rqa_s03e02_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S03E02_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s03e02"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s03e03_raw",
dataset_name="dadaab_s03e03",
time_field="sent_on",
run_id_field="rqa_s03e03_run_id",
coda_filename="dadaab_s03e03.json",
icr_filename="dadaab_s03e03.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S03E03_REASONS,
coded_field="rqa_s03e03_coded",
analysis_file_key="rqa_s03e03_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S03E03_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s03e03"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s03e04_raw",
dataset_name="dadaab_s03e04",
time_field="sent_on",
run_id_field="rqa_s03e04_run_id",
coda_filename="dadaab_s03e04.json",
icr_filename="dadaab_s03e04.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S03E04_REASONS,
coded_field="rqa_s03e04_coded",
analysis_file_key="rqa_s03e04_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S03E04_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s03e04"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s03e05_raw",
dataset_name="dadaab_s03e05",
time_field="sent_on",
run_id_field="rqa_s03e05_run_id",
coda_filename="dadaab_s03e05.json",
icr_filename="dadaab_s03e05.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S03E05_REASONS,
coded_field="rqa_s03e05_coded",
analysis_file_key="rqa_s03e05_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S03E05_REASONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab s03e05"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="expectations_from_educated_girls_raw",
dataset_name="dadaab_expectations_from_educated_girls",
listening_group_filename="dadaab_expectations_from_educated_girls.csv",
time_field="sent_on",
run_id_field="expectations_from_educated_girls_run_id",
coda_filename="dadaab_expectations_from_educated_girls.json",
icr_filename="dadaab_expectations_from_educated_girls.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_EXPECTATIONS_FROM_EDUCATED_GIRLS,
coded_field="expectations_from_educated_girls_coded",
analysis_file_key="expectations_from_educated_girls_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_EXPECTATIONS_FROM_EDUCATED_GIRLS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab expectations_from_educated_girls"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="prevention_of_sgbv_cases_raw",
dataset_name="dadaab_prevention_of_sgbv_cases",
listening_group_filename="dadaab_prevention_of_sgbv_cases.csv",
time_field="sent_on",
run_id_field="prevention_of_sgbv_cases_run_id",
coda_filename="dadaab_prevention_of_sgbv_cases.json",
icr_filename="dadaab_prevention_of_sgbv_cases.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_PREVENTION_OF_SGBV_CASES,
coded_field="prevention_of_sgbv_cases_coded",
analysis_file_key="prevention_of_sgbv_cases_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_PREVENTION_OF_SGBV_CASES, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab prevention_of_sgbv_cases"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="s03_impact_made_raw",
dataset_name="s03_dadaab_impact_made",
time_field="sent_on",
run_id_field="s03_impact_made_run_id",
icr_filename="s03_impact_made.csv",
coda_filename="s03_dadaab_impact_made.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.S03_DADAAB_IMPACT_MADE,
coded_field="s03_impact_made",
analysis_file_key="s03_impact_made_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.S03_DADAAB_IMPACT_MADE, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value(
"s03 dadaab impact made"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="s03_lessons_learnt_raw",
dataset_name="s03_dadaab_lessons_learnt",
time_field="sent_on",
icr_filename="s03_lessons_learnt.csv",
run_id_field="s03_lessons_learnt_run_id",
coda_filename="s03_dadaab_lessons_learnt.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.S03_DADAAB_LESSONS_LEARNT,
coded_field="s03_lessons_learnt",
analysis_file_key="s03_lessons_learnt_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.S03_DADAAB_LESSONS_LEARNT, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value(
"s03 dadaab lessons learnt"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="s03_close_out_raw",
dataset_name="s03_close_out",
time_field="sent_on",
run_id_field="s03_close_out_run_id",
icr_filename="s03_close_out.csv",
coda_filename="dadaab_s03_close_out.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_S03_CLOSE_OUTS,
coded_field="s03_close_out",
analysis_file_key="s03_close_out_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_S03_CLOSE_OUTS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value(
"dadaab s03 close out"),
raw_field_fold_strategy=FoldStrategies.concatenate)
]
S01_KAKUMA_RQA_CODING_PLANS = [
CodingPlan(raw_field="rqa_s01e01_raw",
dataset_name="kakuma_s01e01",
listening_group_filename="kakuma_s01e01_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s01e01_run_id",
coda_filename="kakuma_s01e01.json",
icr_filename="kakuma_s01e01.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S01E01_REASONS,
coded_field="rqa_s01e01_coded",
analysis_file_key="rqa_s01e01_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S01E01_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s01e01"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s01e02_raw",
dataset_name="kakuma_s01e02",
listening_group_filename="kakuma_s01e02_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s01e02_run_id",
coda_filename="kakuma_s01e02.json",
icr_filename="kakuma_s01e02.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S01E02_REASONS,
coded_field="rqa_s01e02_coded",
analysis_file_key="rqa_s01e02_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S01E02_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s01e02"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s01e03_raw",
dataset_name="kakuma_s01e03",
listening_group_filename="kakuma_s01e03_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s01e03_run_id",
coda_filename="kakuma_s01e03.json",
icr_filename="kakuma_s01e03.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S01E03_REASONS,
coded_field="rqa_s01e03_coded",
analysis_file_key="rqa_s01e03_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S01E03_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s01e03"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s01e04_raw",
dataset_name="kakuma_s01e04",
listening_group_filename="kakuma_s01e04_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s01e04_run_id",
coda_filename="kakuma_s01e04.json",
icr_filename="kakuma_s01e04.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S01E04_REASONS,
coded_field="rqa_s01e04_coded",
analysis_file_key="rqa_s01e04_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S01E04_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s01e04"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s01e05_raw",
dataset_name="kakuma_s01e05",
listening_group_filename="kakuma_s01e05_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s01e05_run_id",
coda_filename="kakuma_s01e05.json",
icr_filename="kakuma_s01e05.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S01E05_REASONS,
coded_field="rqa_s01e05_coded",
analysis_file_key="rqa_s01e05_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S01E05_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s01e05"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s01e06_raw",
dataset_name="kakuma_s01e06",
listening_group_filename="kakuma_s01e06_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s01e06_run_id",
coda_filename="kakuma_s01e06.json",
icr_filename="kakuma_s01e06.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S01E06_REASONS,
coded_field="rqa_s01e06_coded",
analysis_file_key="rqa_s01e06_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S01E06_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s01e06"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s01e07_raw",
dataset_name="kakuma_s01e07",
listening_group_filename="kakuma_s01e07_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s01e07_run_id",
coda_filename="kakuma_s01e07.json",
icr_filename="kakuma_s01e07.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S01E07_REASONS,
coded_field="rqa_s01e07_coded",
analysis_file_key="rqa_s01e07_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S01E07_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s01e07"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s01_intro_raw",
dataset_name="kakuma_s01_intro",
time_field="sent_on",
run_id_field="rqa_s01_intro_run_id",
coda_filename="kakuma_s01_intro.json",
icr_filename="kakuma_s01_intro.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S01_INTRO_REASONS,
coded_field="rqa_s01_intro_coded",
analysis_file_key="rqa_s01_intro_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S01_INTRO_REASONS,
x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s01 intro"),
raw_field_fold_strategy=FoldStrategies.concatenate),
]
S02_KAKUMA_RQA_CODING_PLANS = [
CodingPlan(raw_field="community_views_on_girls_education_raw",
dataset_name="kakuma_community_views_on_girls_education",
time_field="sent_on",
coda_filename="kakuma_community_views_on_girls_education.json",
run_id_field="community_views_on_girls_education_run_id",
icr_filename="kakuma_community_views_on_girls_education.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_COMMUNITY_VIEWS_ON_GIRLS_EDUCATION,
coded_field="community_views_on_girls_education_coded",
analysis_file_key="community_views_on_girls_education_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_COMMUNITY_VIEWS_ON_GIRLS_EDUCATION, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value(
"kakuma community views on girls education"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="community_views_on_girls_education_final_raw",
dataset_name="kakuma_community_views_on_girls_education_final",
time_field="sent_on",
coda_filename="kakuma_community_views_on_girls_education_final.json",
run_id_field="community_views_on_girls_education_final_run_id",
icr_filename="kakuma_community_views_on_girls_education_final.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_COMMUNITY_VIEWS_ON_GIRLS_EDUCATION_FINAL,
coded_field="community_views_on_girls_education_final_coded",
analysis_file_key="community_views_on_girls_education_final_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_COMMUNITY_VIEWS_ON_GIRLS_EDUCATION_FINAL, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value(
"kakuma community views on girls education final"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s02e01_raw",
dataset_name="kakuma_s02e01",
listening_group_filename="kakuma_s02e01_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s02e01_run_id",
coda_filename="kakuma_s02e01.json",
icr_filename="kakuma_s02e01.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S02E01_REASONS,
coded_field="rqa_s02e01_coded",
analysis_file_key="rqa_s02e01_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S02E01_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s02e01"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s02e02_raw",
dataset_name="kakuma_s02e02",
listening_group_filename="kakuma_s02e02_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s02e02_run_id",
coda_filename="kakuma_s02e02.json",
icr_filename="kakuma_s02e02.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S02E02_REASONS,
coded_field="rqa_s02e02_coded",
analysis_file_key="rqa_s02e02_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S02E02_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s02e02"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s02e03_raw",
dataset_name="kakuma_s02e03",
listening_group_filename="kakuma_s02e03_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s02e03_run_id",
coda_filename="kakuma_s02e03.json",
icr_filename="kakuma_s02e03.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S02E03_REASONS,
coded_field="rqa_s02e03_coded",
analysis_file_key="rqa_s02e03_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S02E03_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s02e03"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s02e04_raw",
dataset_name="kakuma_s02e04",
listening_group_filename="kakuma_s02e04_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s02e04_run_id",
coda_filename="kakuma_s02e04.json",
icr_filename="kakuma_s02e04.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S02E04_REASONS,
coded_field="rqa_s02e04_coded",
analysis_file_key="rqa_s02e04_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S02E04_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s02e04"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s02e05_raw",
dataset_name="kakuma_s02e05",
listening_group_filename="kakuma_s02e05_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s02e05_run_id",
coda_filename="kakuma_s02e05.json",
icr_filename="kakuma_s02e05.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S02E05_REASONS,
coded_field="rqa_s02e05_coded",
analysis_file_key="rqa_s02e05_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S02E05_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s02e05"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s02e06_raw",
dataset_name="kakuma_s02e06",
listening_group_filename="kakuma_s02e06_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s02e06_run_id",
coda_filename="kakuma_s02e06.json",
icr_filename="kakuma_s02e06.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S02E06_REASONS,
coded_field="rqa_s02e06_coded",
analysis_file_key="rqa_s02e06_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S02E06_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s02e06"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s02e07_raw",
dataset_name="kakuma_s02e07",
listening_group_filename="kakuma_s02e07_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s02e07_run_id",
coda_filename="kakuma_s02e07.json",
icr_filename="kakuma_s02e07.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S02E07_REASONS,
coded_field="rqa_s02e07_coded",
analysis_file_key="rqa_s02e07_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S02E07_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s02e07"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s02e08_raw",
dataset_name="kakuma_s02e08",
listening_group_filename="kakuma_s02e08_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s02e08_run_id",
coda_filename="kakuma_s02e08.json",
icr_filename="kakuma_s02e08.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S02E08_REASONS,
coded_field="rqa_s02e08_coded",
analysis_file_key="rqa_s02e08_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S02E08_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s02e08"),
raw_field_fold_strategy=FoldStrategies.concatenate),
]
S03_KAKUMA_RQA_CODING_PLANS = [
CodingPlan(raw_field="rqa_s03e01_raw",
dataset_name="kakuma_s03e01",
listening_group_filename="kakuma_s03e01_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s03e01_run_id",
coda_filename="kakuma_s03e01.json",
icr_filename="kakuma_s03e01.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S03E01_REASONS,
coded_field="rqa_s03e01_coded",
analysis_file_key="rqa_s03e01_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S03E01_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s03e01"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s03e02_raw",
dataset_name="kakuma_s03e02",
listening_group_filename="kakuma_s03e02_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s03e02_run_id",
coda_filename="kakuma_s03e02.json",
icr_filename="kakuma_s03e02.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S03E02_REASONS,
coded_field="rqa_s03e02_coded",
analysis_file_key="rqa_s03e02_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S03E02_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s03e02"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s03e03_raw",
dataset_name="kakuma_s03e03",
listening_group_filename="kakuma_s03e03_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s03e03_run_id",
coda_filename="kakuma_s03e03.json",
icr_filename="kakuma_s03e03.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S03E03_REASONS,
coded_field="rqa_s03e03_coded",
analysis_file_key="rqa_s03e03_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S03E03_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s03e03"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s03e04_raw",
dataset_name="kakuma_s03e04",
listening_group_filename="kakuma_s03e04_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s03e04_run_id",
coda_filename="kakuma_s03e04.json",
icr_filename="kakuma_s03e04.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S03E04_REASONS,
coded_field="rqa_s03e04_coded",
analysis_file_key="rqa_s03e04_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S03E04_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s03e04"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="rqa_s03e05_raw",
dataset_name="kakuma_s03e05",
listening_group_filename="kakuma_s03e05_listening_group.csv",
time_field="sent_on",
run_id_field="rqa_s03e05_run_id",
coda_filename="kakuma_s03e05.json",
icr_filename="kakuma_s03e05.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S03E05_REASONS,
coded_field="rqa_s03e05_coded",
analysis_file_key="rqa_s03e05_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S03E05_REASONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma s03e05"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="expectations_from_educated_girls_raw",
dataset_name="kakuma_expectations_from_educated_girls",
listening_group_filename="kakuma_expectations_from_educated_girls.csv",
time_field="sent_on",
run_id_field="expectations_from_educated_girls_run_id",
coda_filename="kakuma_expectations_from_educated_girls.json",
icr_filename="kakuma_expectations_from_educated_girls.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_EXPECTATIONS_FROM_EDUCATED_GIRLS,
coded_field="expectations_from_educated_girls_coded",
analysis_file_key="expectations_from_educated_girls_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_EXPECTATIONS_FROM_EDUCATED_GIRLS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma expectations_from_educated_girls"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="prevention_of_sgbv_cases_raw",
dataset_name="kakuma_prevention_of_sgbv_cases",
listening_group_filename="kakuma_prevention_of_sgbv_cases.csv",
time_field="sent_on",
run_id_field="prevention_of_sgbv_cases_run_id",
coda_filename="kakuma_prevention_of_sgbv_cases.json",
icr_filename="kakuma_prevention_of_sgbv_cases.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_PREVENTION_OF_SGBV_CASES,
coded_field="prevention_of_sgbv_cases_coded",
analysis_file_key="prevention_of_sgbv_cases_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_PREVENTION_OF_SGBV_CASES, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma prevention_of_sgbv_cases"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="s03_impact_made_raw",
dataset_name="s03_kakuma_impact_made",
time_field="sent_on",
run_id_field="s03_impact_made_run_id",
icr_filename="s03_impact_made.csv",
coda_filename="s03_kakuma_impact_made.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.S03_KAKUMA_IMPACT_MADE,
coded_field="s03_impact_made",
analysis_file_key="s03_impact_made_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.S03_KAKUMA_IMPACT_MADE, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value(
"s03 kakuma impact made"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="s03_lessons_learnt_raw",
dataset_name="s03_kakuma_lessons_learnt",
time_field="sent_on",
run_id_field="s03_lessons_learnt_run_id",
icr_filename="s03_lessons_learnt.csv",
coda_filename="s03_kakuma_lessons_learnt.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.S03_KAKUMA_LESSONS_LEARNT,
coded_field="s03_lessons_learnt",
analysis_file_key="s03_lessons_learnt_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.S03_KAKUMA_LESSONS_LEARNT, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value(
"s03 kakuma lessons learnt"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="s03_close_out_raw",
dataset_name="s03_close_out",
time_field="sent_on",
run_id_field="s03_close_out_run_id",
icr_filename="s03_close_out.csv",
coda_filename="kakuma_s03_close_out.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_S03_CLOSE_OUTS,
coded_field="s03_close_out",
analysis_file_key="s03_close_out_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_S03_CLOSE_OUTS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value(
"kakuma s03 close out"),
raw_field_fold_strategy=FoldStrategies.concatenate)
]
def get_rqa_coding_plans(pipeline_name):
if pipeline_name == "dadaab_s01_pipeline":
return S01_DADAAB_RQA_CODING_PLANS
elif pipeline_name == "dadaab_s02_pipeline":
return S02_DADAAB_RQA_CODING_PLANS
elif pipeline_name == "dadaab_s03_pipeline":
return S03_DADAAB_RQA_CODING_PLANS
elif pipeline_name == "dadaab_all_seasons_pipeline":
return S01_DADAAB_RQA_CODING_PLANS + S02_DADAAB_RQA_CODING_PLANS + S03_DADAAB_RQA_CODING_PLANS
elif pipeline_name == "kakuma_s01_pipeline":
return S01_KAKUMA_RQA_CODING_PLANS
elif pipeline_name == "kakuma_s02_pipeline":
return S02_KAKUMA_RQA_CODING_PLANS
elif pipeline_name == "kakuma_s03_pipeline":
return S03_KAKUMA_RQA_CODING_PLANS
else:
assert pipeline_name == "kakuma_all_seasons_pipeline", "PipelineName must be either a " \
"'seasonal pipeline' or 'all seasons pipeline'"
return S01_KAKUMA_RQA_CODING_PLANS + S02_KAKUMA_RQA_CODING_PLANS + S03_KAKUMA_RQA_CODING_PLANS
DADAAB_DEMOGS_CODING_PLAN = [
CodingPlan(raw_field="location_raw",
dataset_name="dadaab_location",
time_field="location_time",
coda_filename="dadaab_location.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.DADAAB_LOCATION,
coded_field="location_coded",
analysis_file_key="location",
fold_strategy=FoldStrategies.assert_label_ids_equal,
),
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab location"),
raw_field_fold_strategy=FoldStrategies.assert_equal),
CodingPlan(raw_field="gender_raw",
dataset_name="dadaab_gender",
time_field="gender_time",
coda_filename="dadaab_gender.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.GENDER,
cleaner=somali.DemographicCleaner.clean_gender,
coded_field="gender_coded",
analysis_file_key="gender",
fold_strategy=FoldStrategies.assert_label_ids_equal,
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab gender"),
raw_field_fold_strategy=FoldStrategies.assert_equal),
CodingPlan(raw_field="age_raw",
dataset_name="dadaab_age",
time_field="age_time",
coda_filename="dadaab_age.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.AGE,
cleaner=lambda text: clean_age_with_range_filter(text),
coded_field="age_coded",
analysis_file_key="age",
fold_strategy=FoldStrategies.assert_label_ids_equal,
),
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.AGE_CATEGORY,
coded_field="age_category_coded",
analysis_file_key="age_category",
fold_strategy=FoldStrategies.assert_label_ids_equal,
)
],
code_imputation_function=code_imputation_functions.impute_age_category,
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab age"),
raw_field_fold_strategy=FoldStrategies.assert_equal),
CodingPlan(raw_field="household_language_raw",
dataset_name="dadaab_household_language",
time_field="household_language_time",
coda_filename="dadaab_household_language.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.DADAAB_HOUSEHOLD_LANGUAGE,
coded_field="household_language_coded",
analysis_file_key="household_language",
fold_strategy=FoldStrategies.assert_label_ids_equal,
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab household language"),
raw_field_fold_strategy=FoldStrategies.assert_equal),
CodingPlan(raw_field="nationality_raw",
dataset_name="dadaab_nationality",
time_field="nationality_time",
coda_filename="dadaab_nationality.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.NATIONALITY,
coded_field="nationality_coded",
analysis_file_key="nationality",
fold_strategy=FoldStrategies.assert_label_ids_equal,
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab nationality"),
raw_field_fold_strategy=FoldStrategies.assert_equal),
CodingPlan(raw_field="disabled_raw",
dataset_name="dadaab_disabled",
time_field="disabled_time",
coda_filename="dadaab_disabled.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.DISABLED,
coded_field="disabled_coded",
analysis_file_key="disabled",
fold_strategy=FoldStrategies.assert_label_ids_equal,
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value("dadaab disabled"),
raw_field_fold_strategy=FoldStrategies.assert_equal)
]
KAKUMA_DEMOG_CODING_PLANS = [
CodingPlan(raw_field="location_raw",
dataset_name="kakuma_location",
time_field="location_time",
coda_filename="kakuma_location.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.KAKUMA_LOCATION,
coded_field="location_coded",
analysis_file_key="location",
fold_strategy=FoldStrategies.assert_label_ids_equal,
),
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma location"),
raw_field_fold_strategy=FoldStrategies.assert_equal),
CodingPlan(raw_field="gender_raw",
dataset_name="kakuma_gender",
time_field="gender_time",
coda_filename="kakuma_gender.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.GENDER,
cleaner=somali.DemographicCleaner.clean_gender,
coded_field="gender_coded",
analysis_file_key="gender",
fold_strategy=FoldStrategies.assert_label_ids_equal,
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma gender"),
raw_field_fold_strategy=FoldStrategies.assert_equal),
CodingPlan(raw_field="age_raw",
dataset_name="kakuma_age",
time_field="age_time",
coda_filename="kakuma_age.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.AGE,
cleaner=lambda text: clean_age_with_range_filter(text),
coded_field="age_coded",
analysis_file_key="age",
fold_strategy=FoldStrategies.assert_label_ids_equal,
include_in_theme_distribution=False
),
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.AGE_CATEGORY,
coded_field="age_category_coded",
analysis_file_key="age_category",
fold_strategy=FoldStrategies.assert_label_ids_equal,
)
],
code_imputation_function=code_imputation_functions.impute_age_category,
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma age"),
raw_field_fold_strategy=FoldStrategies.assert_equal),
CodingPlan(raw_field="household_language_raw",
dataset_name="kakuma_household_language",
time_field="household_language_time",
coda_filename="kakuma_household_language.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.KAKUMA_HOUSEHOLD_LANGUAGE,
coded_field="household_language_coded",
analysis_file_key="household_language",
fold_strategy=FoldStrategies.assert_label_ids_equal,
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma household language"),
raw_field_fold_strategy=FoldStrategies.assert_equal),
CodingPlan(raw_field="nationality_raw",
dataset_name="kakuma_nationality",
time_field="nationality_time",
coda_filename="kakuma_nationality.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.NATIONALITY,
coded_field="nationality_coded",
analysis_file_key="nationality",
fold_strategy=FoldStrategies.assert_label_ids_equal,
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma nationality"),
raw_field_fold_strategy=FoldStrategies.assert_equal),
CodingPlan(raw_field="disabled_raw",
dataset_name="kakuma_disabled",
time_field="disabled_time",
coda_filename="kakuma_disabled.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.DISABLED,
coded_field="disabled_coded",
analysis_file_key="disabled",
fold_strategy=FoldStrategies.assert_label_ids_equal,
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value("kakuma disabled"),
raw_field_fold_strategy=FoldStrategies.assert_equal),
]
def get_demog_coding_plans(pipeline_name):
if pipeline_name in ["dadaab_s01_pipeline", "dadaab_s02_pipeline", "dadaab_s03_pipeline", "dadaab_all_seasons_pipeline"]:
return DADAAB_DEMOGS_CODING_PLAN
else:
assert pipeline_name in ["kakuma_s01_pipeline", "kakuma_s02_pipeline", "kakuma_s03_pipeline", "kakuma_all_seasons_pipeline"],\
"PipelineName must be either a 'seasonal pipeline' or 'all seasons pipeline'"
return KAKUMA_DEMOG_CODING_PLANS
S01_DADAAB_FOLLOW_UP_CODING_PLANS = [
CodingPlan(raw_field="girls_education_champions_raw",
dataset_name="dadaab_girls_education_champions",
time_field="girls_education_champions_time",
coda_filename="dadaab_girls_education_champions.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_GIRLS_EDUCATION_CHAMPIONS,
coded_field="girls_education_champions_coded",
analysis_file_key="girls_education_champions_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_GIRLS_EDUCATION_CHAMPIONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value(
"dadaab girls education champions"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="encouragement_for_boys_raw",
dataset_name="dadaab_encouragement_for_boys",
time_field="encouragement_for_boys_time",
coda_filename="dadaab_encouragement_for_boys.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_ENCOURAGEMENT_FOR_BOYS_CHAMPIONS,
coded_field="encouragement_for_boys",
analysis_file_key="encouragement_for_boys_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_ENCOURAGEMENT_FOR_BOYS_CHAMPIONS, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value(
"dadaab encouragement for boys"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="unmarried_fathers_community_view_raw",
dataset_name="dadaab_unmarried_fathers_community_view",
time_field="unmarried_fathers_community_view_time",
coda_filename="dadaab_unmarried_fathers_community_view.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_UNMARRIED_FATHERS_COMMUNITY_VIEW,
coded_field="girls_unmarried_fathers_community_view",
analysis_file_key="girls_unmarried_fathers_community_view_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_UNMARRIED_FATHERS_COMMUNITY_VIEW, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value(
"dadaab unmarried fathers community view"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="lessons_learnt_raw",
dataset_name="dadaab_lessons_learnt",
time_field="lessons_learnt_time",
coda_filename="dadaab_lessons_learnt.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_LESSONS_LEARNT,
coded_field="lessons_learnt",
analysis_file_key="lessons_learnt_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_LESSONS_LEARNT,
x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value(
"dadaab lessons learnt"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="show_suggestions_raw",
dataset_name="dadaab_show_suggestions",
time_field="show_suggestions_time",
coda_filename="dadaab_show_suggestions.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_SHOW_SUGGESTIONS,
coded_field="show_suggestions",
analysis_file_key="show_suggestions_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_SHOW_SUGGESTIONS,
x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value(
"dadaab show suggestions"),
raw_field_fold_strategy=FoldStrategies.concatenate)
]
S02_DADAAB_FOLLOW_UP_CODING_PLANS = [
CodingPlan(raw_field="responses_to_sexual_violence_raw",
dataset_name="dadaab_responses_to_sexual_violence",
time_field="responses_to_sexual_violence_time",
coda_filename="dadaab_responses_to_sexual_violence.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_RESPONSES_TO_SEXUAL_VIOLENCE,
coded_field="girls_responses_to_sexual_violence",
analysis_file_key="girls_responses_to_sexual_violence_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_RESPONSES_TO_SEXUAL_VIOLENCE, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value(
"dadaab responses to sexual violence"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="adolescent_mothers_challenges_raw",
dataset_name="dadaab_adolescent_mothers_challenges",
time_field="adolescent_mothers_challenges_time",
coda_filename="dadaab_adolescent_mothers_challenges.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.DADAAB_ADOLESCENT_MOTHERS_CHALLENGES,
coded_field="adolescent_mothers_challenges",
analysis_file_key="adolescent_mothers_challenges_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.DADAAB_ADOLESCENT_MOTHERS_CHALLENGES, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value(
"dadaab adolescent mothers challenges"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="s02_impact_made_raw",
dataset_name="s02_dadaab_impact_made",
time_field="s02_impact_made_time",
coda_filename="s02_dadaab_impact_made.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.S02_DADAAB_IMPACT_MADE,
coded_field="s02_impact_made",
analysis_file_key="s02_impact_made_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.S02_DADAAB_IMPACT_MADE, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value(
"s02 dadaab impact made"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="s02_lessons_learnt_raw",
dataset_name="s02_dadaab_lessons_learnt",
time_field="s02_lessons_learnt_time",
coda_filename="s02_dadaab_lessons_learnt.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.S02_DADAAB_LESSONS_LEARNT,
coded_field="s02_lessons_learnt",
analysis_file_key="s02_lessons_learnt_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.S02_DADAAB_LESSONS_LEARNT, x, y)
)
],
ws_code=CodeSchemes.DADAAB_WS_CORRECT_DATASET.get_code_with_match_value(
"s02 dadaab lessons learnt"),
raw_field_fold_strategy=FoldStrategies.concatenate),
]
S01_KAKUMA_FOLLOW_UP_CODING_PLANS = [
CodingPlan(raw_field="girls_education_champions_raw",
dataset_name="kakuma_girls_education_champions",
time_field="girls_education_champions_time",
coda_filename="kakuma_girls_education_champions.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_GIRLS_EDUCATION_CHAMPIONS,
coded_field="girls_education_champions_coded",
analysis_file_key="girls_education_champions_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_GIRLS_EDUCATION_CHAMPIONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value(
"kakuma girls education champions"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="encouragement_for_boys_raw",
dataset_name="kakuma_encouragement_for_boys",
time_field="encouragement_for_boys_time",
coda_filename="kakuma_encouragement_for_boys.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_ENCOURAGEMENT_FOR_BOYS_CHAMPIONS,
coded_field="encouragement_for_boys",
analysis_file_key="encouragement_for_boys_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_ENCOURAGEMENT_FOR_BOYS_CHAMPIONS, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value(
"kakuma encouragement for boys"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="unmarried_fathers_community_view_raw",
dataset_name="kakuma_unmarried_fathers_community_view",
time_field="unmarried_fathers_community_view_time",
coda_filename="kakuma_unmarried_fathers_community_view.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_UNMARRIED_FATHERS_COMMUNITY_VIEW,
coded_field="unmarried_fathers_community_view",
analysis_file_key="unmarried_fathers_community_view_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_UNMARRIED_FATHERS_COMMUNITY_VIEW, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value(
"kakuma unmarried fathers community view"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="lessons_learnt_raw",
dataset_name="kakuma_lessons_learnt",
time_field="lessons_learnt_time",
coda_filename="kakuma_lessons_learnt.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_LESSONS_LEARNT,
coded_field="lessons_learnt",
analysis_file_key="lessons_learnt_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_LESSONS_LEARNT,
x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value(
"kakuma lessons learnt"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="show_suggestions_raw",
dataset_name="kakuma_show_suggestions",
time_field="show_suggestions_time",
coda_filename="kakuma_show_suggestions.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_SHOW_SUGGESTIONS,
coded_field="show_suggestions",
analysis_file_key="show_suggestions_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_SHOW_SUGGESTIONS,
x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value(
"kakuma show suggestions"),
raw_field_fold_strategy=FoldStrategies.concatenate)
]
S03_DADAAB_FOLLOW_UP_CODING_PLANS = [
]
S02_KAKUMA_FOLLOW_UP_CODING_PLANS = [
CodingPlan(raw_field="responses_to_sexual_violence_raw",
dataset_name="kakuma_responses_to_sexual_violence",
time_field="responses_to_sexual_violence_time",
coda_filename="kakuma_responses_to_sexual_violence.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_RESPONSES_TO_SEXUAL_VIOLENCE,
coded_field="girls_responses_to_sexual_violence",
analysis_file_key="girls_responses_to_sexual_violence_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_RESPONSES_TO_SEXUAL_VIOLENCE, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value(
"kakuma responses to sexual violence"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="adolescent_mothers_challenges_raw",
dataset_name="kakuma_adolescent_mothers_challenges",
time_field="adolescent_mothers_challenges_time",
coda_filename="kakuma_adolescent_mothers_challenges.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.KAKUMA_ADOLESCENT_MOTHERS_CHALLENGES,
coded_field="adolescent_mothers_challenges",
analysis_file_key="adolescent_mothers_challenges_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.KAKUMA_ADOLESCENT_MOTHERS_CHALLENGES, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value(
"kakuma adolescent mothers challenges"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="s02_impact_made_raw",
dataset_name="s02_kakuma_impact_made",
time_field="s02_impact_made_time",
coda_filename="s02_kakuma_impact_made.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.S02_KAKUMA_IMPACT_MADE,
coded_field="s02_impact_made",
analysis_file_key="s02_impact_made_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.S02_KAKUMA_IMPACT_MADE, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value(
"s02 kakuma impact made"),
raw_field_fold_strategy=FoldStrategies.concatenate),
CodingPlan(raw_field="s02_lessons_learnt_raw",
dataset_name="s02_kakuma_lessons_learnt",
time_field="s02_lessons_learnt_time",
coda_filename="s02_kakuma_lessons_learnt.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.S02_KAKUMA_LESSONS_LEARNT,
coded_field="s02_lessons_learnt",
analysis_file_key="s02_lessons_learnt_",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(
CodeSchemes.S02_KAKUMA_LESSONS_LEARNT, x, y)
)
],
ws_code=CodeSchemes.KAKUMA_WS_CORRECT_DATASET.get_code_with_match_value(
"s02 kakuma lessons learnt"),
raw_field_fold_strategy=FoldStrategies.concatenate),
]
S03_KAKUMA_FOLLOW_UP_CODING_PLANS = [
]
def get_follow_up_coding_plans(pipeline_name):
if pipeline_name == "dadaab_s01_pipeline":
return S01_DADAAB_FOLLOW_UP_CODING_PLANS
elif pipeline_name == "dadaab_s02_pipeline":
return S02_DADAAB_FOLLOW_UP_CODING_PLANS
elif pipeline_name == "dadaab_s03_pipeline":
return S03_DADAAB_FOLLOW_UP_CODING_PLANS
elif pipeline_name == "dadaab_all_seasons_pipeline":
return S01_DADAAB_FOLLOW_UP_CODING_PLANS + S02_DADAAB_FOLLOW_UP_CODING_PLANS + S03_DADAAB_FOLLOW_UP_CODING_PLANS
elif pipeline_name == "kakuma_s01_pipeline":
return S01_KAKUMA_FOLLOW_UP_CODING_PLANS
elif pipeline_name == "kakuma_s02_pipeline":
return S02_KAKUMA_FOLLOW_UP_CODING_PLANS
elif pipeline_name == "kakuma_s03_pipeline":
return S03_KAKUMA_FOLLOW_UP_CODING_PLANS
else:
assert pipeline_name == "kakuma_all_seasons_pipeline", "PipelineName must be either a 'seasonal pipeline' or 'all seasons pipeline'"
return S01_KAKUMA_FOLLOW_UP_CODING_PLANS + S02_KAKUMA_FOLLOW_UP_CODING_PLANS + S03_KAKUMA_FOLLOW_UP_CODING_PLANS
def get_ws_correct_dataset_scheme(pipeline_name):
if pipeline_name in ["dadaab_s01_pipeline", "dadaab_s02_pipeline", "dadaab_s03_pipeline", "dadaab_all_seasons_pipeline"]:
return CodeSchemes.DADAAB_WS_CORRECT_DATASET
else:
assert pipeline_name in ["kakuma_s01_pipeline", "kakuma_s02_pipeline", "kakuma_s03_pipeline", "kakuma_all_seasons_pipeline"], \
"PipelineName must be either a 'seasonal pipeline' or 'all seasons pipeline'"
return CodeSchemes.KAKUMA_WS_CORRECT_DATASET
| 57.960385
| 140
| 0.529781
| 8,656
| 102,416
| 5.747805
| 0.022759
| 0.041967
| 0.052258
| 0.07075
| 0.934135
| 0.921934
| 0.916528
| 0.890941
| 0.87661
| 0.856531
| 0
| 0.033905
| 0.411928
| 102,416
| 1,766
| 141
| 57.993205
| 0.792173
| 0.002646
| 0
| 0.612766
| 0
| 0
| 0.151868
| 0.064919
| 0
| 0
| 0
| 0.000566
| 0.018237
| 1
| 0.00304
| false
| 0
| 0.00304
| 0
| 0.019453
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7a4ee7192ef668c37cae108ece47ab3c0beadfc1
| 51
|
py
|
Python
|
pybamm/models/submodels/electrolyte/stefan_maxwell/__init__.py
|
jedgedrudd/PyBaMM
|
79c9d34978382d50e09adaf8bf74c8fa4723f759
|
[
"BSD-3-Clause"
] | 1
|
2019-10-29T19:06:04.000Z
|
2019-10-29T19:06:04.000Z
|
pybamm/models/submodels/electrolyte/stefan_maxwell/__init__.py
|
jedgedrudd/PyBaMM
|
79c9d34978382d50e09adaf8bf74c8fa4723f759
|
[
"BSD-3-Clause"
] | null | null | null |
pybamm/models/submodels/electrolyte/stefan_maxwell/__init__.py
|
jedgedrudd/PyBaMM
|
79c9d34978382d50e09adaf8bf74c8fa4723f759
|
[
"BSD-3-Clause"
] | null | null | null |
from . import conductivity
from . import diffusion
| 17
| 26
| 0.803922
| 6
| 51
| 6.833333
| 0.666667
| 0.487805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 51
| 2
| 27
| 25.5
| 0.953488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7a5a566a4947668bb3370407849bafcb3ba7e16a
| 167
|
py
|
Python
|
juniper_official/System/system-sensors.py
|
brahmastra2016/healthbot-rules
|
1d24acd298266c39d6adb139ff47d14f8b2d452a
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 43
|
2018-11-27T00:42:45.000Z
|
2022-02-24T01:19:39.000Z
|
juniper_official/System/system-sensors.py
|
brahmastra2016/healthbot-rules
|
1d24acd298266c39d6adb139ff47d14f8b2d452a
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 266
|
2018-10-26T10:19:04.000Z
|
2022-03-16T04:38:29.000Z
|
juniper_official/System/system-sensors.py
|
brahmastra2016/healthbot-rules
|
1d24acd298266c39d6adb139ff47d14f8b2d452a
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 99
|
2018-10-25T09:53:55.000Z
|
2021-12-07T09:51:59.000Z
|
from __future__ import division
'''
This function subtract returns difference between a and b
'''
def subtract(threshold, **kwargs):
return 100-int(threshold)
| 23.857143
| 57
| 0.742515
| 21
| 167
| 5.714286
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021583
| 0.167665
| 167
| 6
| 58
| 27.833333
| 0.841727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
7a8e0f719d60e4a6b10f301d7ab76f62b1ee78b1
| 239
|
py
|
Python
|
Molecools.py
|
McCoyGroup/Coordinerds
|
058a4f5b29f157e499cec3c8f2da8b216f0210ef
|
[
"MIT"
] | null | null | null |
Molecools.py
|
McCoyGroup/Coordinerds
|
058a4f5b29f157e499cec3c8f2da8b216f0210ef
|
[
"MIT"
] | null | null | null |
Molecools.py
|
McCoyGroup/Coordinerds
|
058a4f5b29f157e499cec3c8f2da8b216f0210ef
|
[
"MIT"
] | null | null | null |
"""
A file that only exists to make this directory useable as a way to import Psience too.
Mostly useful during development
"""
from .Psience.Molecools import *
from .Psience.Molecools import __all__
from .Psience.Molecools import __doc__
| 29.875
| 86
| 0.794979
| 35
| 239
| 5.2
| 0.657143
| 0.181319
| 0.32967
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146444
| 239
| 8
| 87
| 29.875
| 0.892157
| 0.497908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7aaaeacead6e0ebf58d39d1c1f93225c2a582f69
| 20,410
|
py
|
Python
|
_test_deliver_data.py
|
openSAIL/MASTDataDelivery
|
60aa8468e55d0d76e1245c3a956a8429e42f4107
|
[
"MIT"
] | 1
|
2018-06-15T17:02:27.000Z
|
2018-06-15T17:02:27.000Z
|
_test_deliver_data.py
|
openSAIL/MASTDataDelivery
|
60aa8468e55d0d76e1245c3a956a8429e42f4107
|
[
"MIT"
] | 44
|
2015-06-05T14:08:39.000Z
|
2018-03-27T21:16:30.000Z
|
_test_deliver_data.py
|
spacetelescope/MASTDataDelivery
|
60aa8468e55d0d76e1245c3a956a8429e42f4107
|
[
"MIT"
] | 1
|
2020-10-03T01:44:52.000Z
|
2020-10-03T01:44:52.000Z
|
"""
.. module:: _test_deliver_data
:synopsis: Test module for deliver_data.py
.. moduleauthor:: Scott W. Fleming <fleming@stsci.edu>
"""
import gzip
import os
import unittest
import deliver_data
#--------------------
class TestGetDataKepler(unittest.TestCase):
""" Main test class. """
reference_file_path = "unit_test_reffiles/deliver_data/"
# Test Cases 1 - 3 = Kepler
def test_case01(self):
""" This uses Kepler 16 to test Kepler long cadence. """
new_str = deliver_data.deliver_data(
["kepler"], ["kplr012644769_lc_Q111111111111111111"],
filters=["kepler"])
old_file = self.reference_file_path + "test_case_01.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case02(self):
""" This uses KIC 757450 to test Kepler short cadence. """
new_str = deliver_data.deliver_data(
["kepler"], ["kplr000757450_sc_Q000000000033333300"],
filters=["kepler"])
old_file = self.reference_file_path + "test_case_02.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
# You can't plot more than one lightcurve at a time, so turn this off for
# now (it's broken currently witch caches anyways).
@unittest.skip("Skipping test of two Kepler lightcurves - unsupported.")
def test_case03(self):
""" This uses both Kepler 16 and KIC 757450 to test more than one obsID
in a single request. It also includes a mix of cadence types. """
new_str = deliver_data.deliver_data(
["kepler", "kepler"],
["kplr012644769_lc_Q111111111111111111",
"kplr000757450_sc_Q000000000033333300"],
filters=["kepler", "kepler"])
old_file = self.reference_file_path + "test_case_03.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
# Test Cases 4 - 5 = HLSP_K2VARCAT
def test_case04(self):
""" This uses EPIC 202070161 from Campaign 0. """
new_str = deliver_data.deliver_data(
["hlsp_k2varcat"], ["k2varcat202070161-c00_lc"], filters=["k2"])
old_file = self.reference_file_path + "test_case_04.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case05(self):
""" This uses EPIC 201515470 from Campaign 1. """
new_str = deliver_data.deliver_data(
["hlsp_k2varcat"], ["k2varcat201515470-c01_lc"], filters=["k2"])
old_file = self.reference_file_path + "test_case_05.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
# Test Cases 6 - 8 = HLSP_K2SFF
def test_case06(self):
""" This uses EPIC 060019819 from the Engineering Campaign (cet). """
new_str = deliver_data.deliver_data(
["hlsp_k2sff"], ["k2sff060019819-cet_lc"], filters=["k2"])
old_file = self.reference_file_path + "test_case_06.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case07(self):
""" This uses EPIC 202071387 from Campaign 0. """
new_str = deliver_data.deliver_data(
["hlsp_k2sff"], ["k2sff202071387-c00_lc"], filters=["k2"])
old_file = self.reference_file_path + "test_case_07.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case08(self):
""" This uses EPIC 204417450 from Campaign 2. """
new_str = deliver_data.deliver_data(
["hlsp_k2sff"], ["k2sff204417450-c02_lc"], filters=["k2"])
old_file = self.reference_file_path + "test_case_08.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
# Test Cases 9 - 17 = IUE
def test_case09(self):
""" Test of IUE LWP High Dispersion. """
new_str = deliver_data.deliver_data(
["iue"], ["lwp00501"], filters=["HIGH_DISP"])
old_file = self.reference_file_path + "test_case_09.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case10(self):
""" Test of IUE LWP Low Dispersion. """
new_str = deliver_data.deliver_data(
["iue"], ["lwp02572"], filters=["LOW_DISP"])
old_file = self.reference_file_path + "test_case_10.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case11(self):
""" Test of IUE LWR Low Dispersion (also is a double aperture). """
new_str = deliver_data.deliver_data(
["iue"], ["lwr01244"], filters=["LOW_DISP"])
old_file = self.reference_file_path + "test_case_11.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case12(self):
""" Test of IUE LWR High Dispersion. """
new_str = deliver_data.deliver_data(
["iue"], ["lwr01245"], filters=["HIGH_DISP"])
old_file = self.reference_file_path + "test_case_12.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case13(self):
""" Test of IUE SWP Low Dispersion. """
new_str = deliver_data.deliver_data(
["iue"], ["swp01687"], filters=["LOW_DISP"])
old_file = self.reference_file_path + "test_case_13.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case14(self):
""" Test of IUE SWP High Dispersion. """
new_str = deliver_data.deliver_data(
["iue"], ["swp01688"], filters=["HIGH_DISP"])
old_file = self.reference_file_path + "test_case_14.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case15(self):
""" Test of IUE double dispersion. """
new_str = deliver_data.deliver_data(
["iue"], ["lwp04212"], filters=["HIGH_DISP"])
old_file = self.reference_file_path + "test_case_15.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case16(self):
""" Test of IUE double aperture. """
new_str = deliver_data.deliver_data(
["iue"], ["lwp15463"], filters=["LOw_DISP"])
old_file = self.reference_file_path + "test_case_16.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case17(self):
""" Test of IUE double dispersion and double aperture. """
new_str = deliver_data.deliver_data(
["iue"], ["swp32470"], filters=["HIGH_DISP"])
old_file = self.reference_file_path + "test_case_17.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
# Test Case 18 = K2
def test_case18(self):
""" Test of K2 extracted lightcurves (from mission). """
new_str = deliver_data.deliver_data(
["k2"], ["ktwo205896873-c03_lc"], filters=["k2"])
old_file = self.reference_file_path + "test_case_18.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
# Test Cases 19-21 = GALEX
def test_case19(self):
""" Test of GALEX 2D spectral image (should return error JSON). """
new_str = deliver_data.deliver_data(
["galex"], ["2518748180271595520"], filters=['NUV'],
urls=[("galex.stsci.edu/data/GR6/pipe/01-vsn/06051-CDFS_00/g/01-"
"main/0001-img/07-try/qa/CDFS_00-xg-int_2color.jpg")])
old_file = self.reference_file_path + "test_case_19.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case20(self):
""" Test of GALEX 1D FUV extracted spectrum. """
new_str = deliver_data.deliver_data(
["galex"], ["2518748180274763038"], filters=['FUV'],
urls=[("galex.stsci.edu/data/GR6/pipe/01-vsn/06051-CDFS_00/g/"
"01-main/0001-img/07-try/qa/spjpeg/"
"CDFS_00_id021790-xg-gsp_spc.jpeg")])
old_file = self.reference_file_path + "test_case_20.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case21(self):
""" Test of GALEX 1D NUV extracted spectrum. """
new_str = deliver_data.deliver_data(
["galex"], ["2505272565762628292"], filters=['NUV'],
urls=[("galex.stsci.edu/data/GR7/pipe/01-vsn/05668-PTF10cwr/g/"
"01-main/0001-img/07-try/qa/spjpeg/"
"PTF10cwr_id006852-xg-gsp_spc.jpeg")])
old_file = self.reference_file_path + "test_case_21.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case22(self):
""" Test of HLA/HSC extracted grism spectrum. """
new_str = deliver_data.deliver_data(
["hsc_grism"], ["HAG_J033148.83-274850.4_UDFNICP2_V01.SPEC1D.FITS"])
old_file = self.reference_file_path + "test_case_22.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case23(self):
""" Test of HLA/HSC 2D grism spectrum - handle it's unsupported. """
new_str = deliver_data.deliver_data(
["hsc_grism"], ["HAG_J033148.83-274850.4_UDFNICP2_V01.SPEC2D.FITS"])
old_file = self.reference_file_path + "test_case_23.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case24(self):
""" This uses EPIC 200004923 from Campaign 3. """
new_str = deliver_data.deliver_data(
["hlsp_k2sc"], ["k2sc200004923-c03_lc"], filters=["k2"])
old_file = self.reference_file_path + "test_case_24.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case25(self):
""" This uses EPIC 210636932 from Campaign 4. """
new_str = deliver_data.deliver_data(
["hlsp_everest"], ["everest210636932-c04_lc"], filters=["k2"])
old_file = self.reference_file_path + "test_case_25.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case26(self):
""" This tests HSLA support at the coadd level. """
new_str = deliver_data.deliver_data(
["hsla"], ["hsla_coadd"], targets=["NGC-5548"])
old_file = self.reference_file_path + "test_case_26.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case26_2(self):
""" This tests HSLA support at the coadd level with NUV. """
new_str = deliver_data.deliver_data(
["hsla"], ["hsla_coadd"], targets=["HD-6655"])
old_file = self.reference_file_path + "test_case_26_2.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case27(self):
""" This tests HSLA support at the exposure level. """
new_str = deliver_data.deliver_data(
["hsla"], ["lbgu22z3q"], targets=["NGC-5548"])
old_file = self.reference_file_path + "test_case_27.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case28(self):
""" This uses EPIC 201172129 from Campaign 1. """
new_str = deliver_data.deliver_data(
["hlsp_polar"], ["polar201172129-c01_lc"], filters=["k2"])
old_file = self.reference_file_path + "test_case_28.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case29(self):
""" This uses EPIC 201121245 from Campaign 1. """
new_str = deliver_data.deliver_data(
["hlsp_k2gap"], ["k2gap201121245-c01_lc"], filters=["k2"])
old_file = self.reference_file_path + "test_case_29.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case30(self):
""" This uses EPIC 220163813 from Campaign 8. """
new_str = deliver_data.deliver_data(
["hlsp_kegs"], ["kegs220163813-c08_lc"], filters=["k2"])
old_file = self.reference_file_path + "test_case_30.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case31(self):
""" This tests a STATES file that should work. """
new_str = deliver_data.deliver_data(
["states"], ["XO-1b_transmission_Deming2013"])
old_file = self.reference_file_path + "test_case_31.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case32(self):
""" This tests a STATES file that is empty and should not work. """
new_str = deliver_data.deliver_data(
["states"], ["TRAPPIST-1b_transmission_deWit2016"])
old_file = self.reference_file_path + "test_case_32.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
def test_case33(self):
""" This tests a K2 short cadence file. """
new_str = deliver_data.deliver_data(
["k2"], ["ktwo203385347-c15_sc"])
old_file = self.reference_file_path + "test_case_33.txt.gz"
if os.path.isfile(old_file):
with gzip.open(old_file, 'rt') as oldfile:
old_str = oldfile.readlines()[0].strip()
else:
self.fail(msg="Reference file not found. Looking for " + old_file)
self.assertEqual(old_str, new_str)
#--------------------
if __name__ == "__main__":
unittest.main()
| 44.273319
| 80
| 0.60098
| 2,685
| 20,410
| 4.347114
| 0.119926
| 0.081563
| 0.064085
| 0.04952
| 0.81794
| 0.796008
| 0.791895
| 0.761309
| 0.720613
| 0.657985
| 0
| 0.054686
| 0.270701
| 20,410
| 460
| 81
| 44.369565
| 0.729459
| 0.102205
| 0
| 0.668508
| 0
| 0.008287
| 0.197413
| 0.048483
| 0
| 0
| 0
| 0
| 0.093923
| 1
| 0.093923
| false
| 0
| 0.01105
| 0
| 0.110497
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7aab78140512f4ed5302e4fe9157bb395b74489d
| 34
|
py
|
Python
|
bridger/tags/__init__.py
|
intellineers/django-bridger
|
ed097984a99df7da40a4d01bd00c56e3c6083056
|
[
"BSD-3-Clause"
] | 2
|
2020-03-17T00:53:23.000Z
|
2020-07-16T07:00:33.000Z
|
bridger/tags/__init__.py
|
intellineers/django-bridger
|
ed097984a99df7da40a4d01bd00c56e3c6083056
|
[
"BSD-3-Clause"
] | 76
|
2019-12-05T01:15:57.000Z
|
2021-09-07T16:47:27.000Z
|
bridger/tags/__init__.py
|
intellineers/django-bridger
|
ed097984a99df7da40a4d01bd00c56e3c6083056
|
[
"BSD-3-Clause"
] | 1
|
2020-02-05T15:09:47.000Z
|
2020-02-05T15:09:47.000Z
|
from .models import TagModelMixin
| 17
| 33
| 0.852941
| 4
| 34
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7aad429d6f1014ef943b3b735db1c56885e8c5f7
| 19,403
|
py
|
Python
|
tests/test_throttling.py
|
sapo/securitylib-python
|
afa176c52fc9effa664be895e86ab9cd07e5018f
|
[
"MIT"
] | 1
|
2015-01-30T16:22:24.000Z
|
2015-01-30T16:22:24.000Z
|
tests/test_throttling.py
|
sapo/securitylib-python
|
afa176c52fc9effa664be895e86ab9cd07e5018f
|
[
"MIT"
] | null | null | null |
tests/test_throttling.py
|
sapo/securitylib-python
|
afa176c52fc9effa664be895e86ab9cd07e5018f
|
[
"MIT"
] | 2
|
2015-06-11T14:30:15.000Z
|
2021-12-01T14:28:54.000Z
|
from securitylib import throttling
from securitylib.random import get_random_token
from securitylib.crypto import generate_authenticator_key
from test_utils import setup_fake_datetime, teardown_fake_datetime, fake_sleep
from nose.tools import eq_, ok_
import mockcache
import unittest
import json
### TESTS ###
class TestThrottling(unittest.TestCase):
def setUp(self):
storage_client = mockcache.Client(["127.0.0.1:11211"])
config = {'authenticator_key': generate_authenticator_key()}
self.counters_storage = throttling.CountersStorage(storage_client, config)
self.session_storage = throttling.SessionStorage(storage_client)
# client
self.state_checker = throttling.StateChecker(self.counters_storage, self.session_storage)
# server
self.state_updater = throttling.StateUpdater(self.counters_storage, self.session_storage)
setup_fake_datetime()
def tearDown(self):
teardown_fake_datetime()
### HELPER METHODS"""
def _test_response(self, response, ip, user=None, pwd=None, ctx=None, session_id=None,
expected_state='ok', expected_throttling_session=False):
eq_(response['state'], expected_state)
if expected_state == 'block':
ok_('unblock_timestamp' in response)
throttling_session = self.session_storage.get(session_id)
eq_(throttling_session is not None, expected_throttling_session)
def _test_login_attempt(self, ip=None, user=None, pwd=None, ctx=None, expected_state_before='ok', expected_state_after='ok', success=False, use_session=True):
# test login attempt with ip, user and pwd
# some of these parameters might be fixed and the rest are generated randomly
if ip is None:
ip = get_random_token()
if user is None:
user = get_random_token()
if pwd is None:
pwd = get_random_token()
session_id = 'ef0c812b00128a8255613efdb1cde34052d450d1' if use_session else None
response = self.state_checker.check_state(ip, user, pwd, session_id, ctx=ctx)
self._test_response(response, ip, user, pwd, expected_state=expected_state_before, ctx=ctx)
self.state_updater.add_request(ip, user, pwd, session_id, ctx=ctx, success=success)
response = self.state_checker.check_state(ip, user, pwd, session_id, ctx=ctx)
self._test_response(response, ip, user, pwd, expected_state=expected_state_after, ctx=ctx)
def _test_request_attempt(self, ip, expected_state_before='ok', expected_state_after='ok', success=False, use_session=True):
# test simple request attempt with only ip
response = self.state_checker.check_state(ip)
self._test_response(response, ip, expected_state=expected_state_before)
self.state_updater.add_request(ip, success=success)
response = self.state_checker.check_state(ip)
self._test_response(response, ip, expected_state=expected_state_after)
def _test_throttling_by_generic(self, key, fixed_ip=False, fixed_user=False, fixed_pwd=False):
ip = get_random_token() if fixed_ip else None
user = get_random_token() if fixed_user else None
pwd = get_random_token() if fixed_pwd else None
captcha_limit = self.state_updater.DEFAULT_CONFIG['limits']['captcha'][key]
block_limit = self.state_updater.DEFAULT_CONFIG['limits']['block'][key]
for i in xrange(captcha_limit - 1):
self._test_login_attempt(ip=ip, user=user, pwd=pwd)
self._test_login_attempt(ip=ip, user=user, pwd=pwd, expected_state_after='captcha')
if not block_limit:
for i in xrange(200):
self._test_login_attempt(ip=ip, user=user, pwd=pwd, expected_state_before='captcha', expected_state_after='captcha')
else:
for i in xrange(block_limit - captcha_limit - 1):
self._test_login_attempt(ip=ip, user=user, pwd=pwd, expected_state_before='captcha', expected_state_after='captcha')
self._test_login_attempt(ip=ip, user=user, pwd=pwd, expected_state_before='captcha', expected_state_after='block')
for i in xrange(10):
self._test_login_attempt(ip=ip, user=user, pwd=pwd, expected_state_before='block', expected_state_after='block')
def _test_expiration_time_counter_generic(self, key, fixed_ip=False, fixed_user=False, fixed_pwd=False):
ip = get_random_token() if fixed_ip else None
user = get_random_token() if fixed_user else None
pwd = get_random_token() if fixed_pwd else None
captcha_limit = self.state_updater.DEFAULT_CONFIG['limits']['captcha'][key]
expiration_time = self.counters_storage.DEFAULT_CONFIG['expiration_times'][key]
for i in xrange(captcha_limit - 1):
self._test_login_attempt(ip=ip, user=user, pwd=pwd)
self._test_login_attempt(ip=ip, user=user, pwd=pwd, expected_state_after='captcha')
response = self.state_checker.check_state(ip=ip, user=user, pwd=pwd)
self._test_response(response, ip=ip, user=user, pwd=pwd, expected_state='captcha')
fake_sleep(expiration_time)
response = self.state_checker.check_state(ip=ip, user=user, pwd=pwd)
self._test_response(response, ip=ip, user=user, pwd=pwd, expected_state='captcha')
fake_sleep(0.01)
response = self.state_checker.check_state(ip=ip, user=user, pwd=pwd)
self._test_response(response, ip=ip, user=user, pwd=pwd)
### ACTUAL TESTS ###
def test_throttling_by_ip_heavily_commented(self):
ip = get_random_token()
session_id = None
# assume user requests the login page
response = self.state_checker.check_state(ip)
self._test_response(response, ip)
# show login page
# set session_id as soon as possible
session_id = 'ef0c812b00128a8255613efdb1cde34052d450d1'
for i in xrange(19):
user = get_random_token()
pwd = get_random_token()
# assume user attempts to login
response = self.state_checker.check_state(ip, user, pwd, session_id)
self._test_response(response, ip, user, pwd)
# assume invalid credentials
self.state_updater.add_request(ip, user, pwd, session_id, success=False)
response = self.state_checker.check_state(ip, user, pwd, session_id)
self._test_response(response, ip, user, pwd)
# show login page
user = get_random_token()
pwd = get_random_token()
# assume user attempts to login
response = self.state_checker.check_state(ip, user, pwd, session_id)
self._test_response(response, ip, user, pwd)
self.state_updater.add_request(ip, user, pwd, session_id, success=False)
# assume invalid credentials
response = self.state_checker.check_state(ip, user, pwd, session_id)
self._test_response(response, ip, user, pwd, expected_state='captcha')
# show login page with captcha
for i in xrange(79):
user = get_random_token()
pwd = get_random_token()
# assume user attempts to login
response = self.state_checker.check_state(ip, user, pwd, session_id)
self._test_response(response, ip, user, pwd, expected_state='captcha')
# assume correct captcha
self.state_updater.add_request(ip, user, pwd, session_id, success=False)
# assume invalid credentials
response = self.state_checker.check_state(ip, user, pwd, session_id)
self._test_response(response, ip, user, pwd, expected_state='captcha')
# show login page with captcha
user = get_random_token()
pwd = get_random_token()
# assume user attempts to login
response = self.state_checker.check_state(ip, user, pwd, session_id)
self._test_response(response, ip, user, pwd, expected_state='captcha')
# assume correct captcha
self.state_updater.add_request(ip, user, pwd, session_id, success=False)
# assume invalid credentials
response = self.state_checker.check_state(ip, user, pwd, session_id)
self._test_response(response, ip, user, pwd, expected_state='block')
# show login page with blocked message
user = get_random_token()
pwd = get_random_token()
# assume user attempts to login
response = self.state_checker.check_state(ip, user, pwd, session_id)
self._test_response(response, ip, user, pwd, expected_state='block')
# assume correct captcha
# cannot proceed, show login page with blocked message
def test_throttling_by_ip(self):
self._test_throttling_by_generic('ip', fixed_ip=True)
def test_throttling_by_user(self):
self._test_throttling_by_generic('user', fixed_user=True)
def test_throttling_by_pwd(self):
self._test_throttling_by_generic('pwd', fixed_pwd=True)
def test_throttling_by_ip_user(self):
self._test_throttling_by_generic('ip_user', fixed_ip=True, fixed_user=True)
def test_throttling_by_ip_pwd(self):
self._test_throttling_by_generic('ip_pwd', fixed_ip=True, fixed_pwd=True)
def test_counters_cleaning(self):
ip = get_random_token()
user = get_random_token()
captcha_limit = self.state_updater.DEFAULT_CONFIG['limits']['captcha']['ip_user']
for i in xrange(captcha_limit - 1):
self._test_login_attempt(ip=ip, user=user, use_session=False)
self._test_login_attempt(ip=ip, user=user, use_session=False, expected_state_after='captcha')
counters = self.counters_storage.get(ip, user)
eq_(counters['ip_user'].value, 3)
eq_(counters['ip'].value, 3)
eq_(counters['user'].value, 1)
self._test_login_attempt(ip=ip, user=user, success=True, use_session=False, expected_state_before='captcha')
counters = self.counters_storage.get(ip, user)
eq_(counters['ip_user'].value, 0)
eq_(counters['ip'].value, 3)
eq_(counters['user'].value, 1)
self._test_login_attempt(ip=ip, user=user, use_session=False)
counters = self.counters_storage.get(ip, user)
eq_(counters['ip_user'].value, 1)
eq_(counters['ip'].value, 4)
# Weird behaviour. User is counted twice due to counter ip_user having been cleared.
# Still, wouldn't happen if the session was being used since the user
# would have a free pass and wouldn't update the counters.
eq_(counters['user'].value, 2)
def test_free_pass(self):
user = get_random_token()
captcha_limit = self.state_updater.DEFAULT_CONFIG['limits']['captcha']['user']
free_pass_limit = self.state_updater.FREE_PASS_LIMIT
for i in xrange(captcha_limit - 1):
self._test_login_attempt(user=user)
self._test_login_attempt(user=user, expected_state_after='captcha')
self._test_login_attempt(user=user, success=True, expected_state_before='captcha')
for i in xrange(100):
self._test_login_attempt(user=user, success=True)
for i in xrange(free_pass_limit - 1):
self._test_login_attempt(user=user, success=False)
self._test_login_attempt(user=user, success=False, expected_state_after='captcha')
self._test_login_attempt(user=user, success=True, expected_state_before='captcha')
for i in xrange(100):
self._test_login_attempt(user=user, success=True)
for i in xrange(100):
self._test_login_attempt(user=user, success=True, use_session=False, expected_state_before='captcha', expected_state_after='captcha')
for i in xrange(100):
self._test_login_attempt(user=user, success=True)
def test_exponential_block_times(self):
ip = '123.123.123.123'
session_id = 'ef0c812b00128a8255613efdb1cde34052d450d1'
initial_blocking_time = self.state_updater.DEFAULT_CONFIG['initial_blocking_time']
# tests start here
for i in xrange(100):
user = get_random_token()
pwd = get_random_token()
# assume user attempts to login
# assume invalid credentials
self.state_updater.add_request(ip, user, pwd, session_id, success=False)
# show login page, possibly with captcha
def test_still_blocked():
user = get_random_token()
pwd = get_random_token()
# assume user attempts to login
response = self.state_checker.check_state(ip, user, pwd, session_id)
self._test_response(response, ip, user, pwd, expected_state='block')
# cannot proceed, show login page with blocked message
def test_unblocked():
user = get_random_token()
pwd = get_random_token()
# assume user attempts to login
response = self.state_checker.check_state(ip, user, pwd, session_id)
self._test_response(response, ip, user, pwd)
# block has expired, proceed to evaluate credentials
# assume invalid credentials
self.state_updater.add_request(ip, user, pwd, session_id, success=False)
response = self.state_checker.check_state(ip, user, pwd, session_id)
self._test_response(response, ip, user, pwd, expected_state='block')
# show login page with blocked message
def test_blocked_during_time(seconds):
now = 0
while now < seconds:
test_still_blocked()
fake_sleep(0.5)
now += 0.5
test_blocked_during_time(initial_blocking_time)
test_unblocked()
test_blocked_during_time(initial_blocking_time * 2)
test_unblocked()
test_blocked_during_time(initial_blocking_time * 4)
test_unblocked()
test_blocked_during_time(initial_blocking_time * 8)
test_unblocked()
def test_expiration_time_counter_ip(self):
self._test_expiration_time_counter_generic('ip', fixed_ip=True)
def test_expiration_time_counter_user(self):
self._test_expiration_time_counter_generic('user', fixed_user=True)
def test_expiration_time_counter_pwd(self):
self._test_expiration_time_counter_generic('pwd', fixed_pwd=True)
def test_expiration_time_counter_ip_user(self):
self._test_expiration_time_counter_generic('ip_user', fixed_ip=True, fixed_user=True)
def test_expiration_time_counter_ip_pwd(self):
self._test_expiration_time_counter_generic('ip_pwd', fixed_ip=True, fixed_pwd=True)
def test_expiration_time_session(self):
user = get_random_token()
session_id = 'ef0c812b00128a8255613efdb1cde34052d450d1'
expiration_time = self.session_storage.DEFAULT_CONFIG['expiration_time']
self._test_login_attempt(user=user, success=True)
throttling_session = self.session_storage.get(session_id)
ok_(throttling_session.has_valid_login(user))
fake_sleep(expiration_time)
throttling_session = self.session_storage.get(session_id)
ok_(throttling_session.has_valid_login(user))
fake_sleep(0.01)
throttling_session = self.session_storage.get(session_id)
ok_(throttling_session is None)
def test_updating_counters_if_using_only_ip(self):
ip = get_random_token()
captcha_limit = self.state_updater.DEFAULT_CONFIG['limits']['captcha']['ip']
block_limit = self.state_updater.DEFAULT_CONFIG['limits']['block']['ip']
for i in xrange(captcha_limit - 1):
self._test_request_attempt(ip=ip)
self._test_request_attempt(ip=ip, expected_state_after='captcha')
if not block_limit:
for i in xrange(200):
self._test_request_attempt(ip=ip, expected_state_before='captcha', expected_state_after='captcha')
else:
for i in xrange(block_limit - captcha_limit - 1):
self._test_request_attempt(ip=ip, expected_state_before='captcha', expected_state_after='captcha')
self._test_request_attempt(ip=ip, expected_state_before='captcha', expected_state_after='block')
for i in xrange(10):
self._test_request_attempt(ip=ip, expected_state_before='block', expected_state_after='block')
def test_multiple_contexts(self):
ip = get_random_token()
captcha_limit = self.state_updater.DEFAULT_CONFIG['limits']['captcha']['ip']
block_limit = self.state_updater.DEFAULT_CONFIG['limits']['block']['ip']
# Context 1
for i in xrange(captcha_limit - 1):
self._test_login_attempt(ip=ip, ctx='1')
self._test_login_attempt(ip=ip, ctx='1', expected_state_after='captcha')
# Context 2
for i in xrange(captcha_limit - 1):
self._test_login_attempt(ip=ip, ctx='2')
self._test_login_attempt(ip=ip, ctx='2', expected_state_after='captcha')
# Context 1
for i in xrange(block_limit - captcha_limit - 1):
self._test_login_attempt(ip=ip, ctx='1', expected_state_before='captcha', expected_state_after='captcha')
self._test_login_attempt(ip=ip, ctx='1', expected_state_before='captcha', expected_state_after='block')
for i in xrange(10):
self._test_login_attempt(ip=ip, ctx='1', expected_state_before='block', expected_state_after='block')
# Context 2
for i in xrange(block_limit - captcha_limit - 1):
self._test_login_attempt(ip=ip, ctx='2', expected_state_before='captcha', expected_state_after='captcha')
self._test_login_attempt(ip=ip, ctx='2', expected_state_before='captcha', expected_state_after='block')
for i in xrange(10):
self._test_login_attempt(ip=ip, ctx='2', expected_state_before='block', expected_state_after='block')
def test_no_captcha(self):
self.state_updater.limits = {
'block': {
'ip': 100,
},
}
ip = get_random_token()
block_limit = self.state_updater.limits['block']['ip']
for i in xrange(block_limit - 1):
self._test_login_attempt(ip=ip)
self._test_login_attempt(ip=ip, expected_state_after='block')
for i in xrange(10):
self._test_login_attempt(ip=ip, expected_state_before='block', expected_state_after='block')
def test_no_block(self):
self.state_updater.limits = {
'captcha': {
'ip': 20,
},
}
ip = get_random_token()
captcha_limit = self.state_updater.limits['captcha']['ip']
for i in xrange(captcha_limit - 1):
self._test_login_attempt(ip=ip)
self._test_login_attempt(ip=ip, expected_state_after='captcha')
for i in xrange(100):
self._test_login_attempt(ip=ip, expected_state_before='captcha', expected_state_after='captcha')
def test_counter_repr():
counter_dict = {'value': 3, 'state': 'block', 'attributes': {'unblock_timestamp': 9999}}
counter = throttling.common.Counter(counter_dict['value'], counter_dict['state'], counter_dict['attributes'])
new_counter_dict = json.loads(repr(counter))
eq_(counter_dict, new_counter_dict)
| 44.914352
| 162
| 0.6784
| 2,545
| 19,403
| 4.848723
| 0.075442
| 0.048622
| 0.05316
| 0.063209
| 0.803404
| 0.779335
| 0.737439
| 0.705105
| 0.664182
| 0.639384
| 0
| 0.014986
| 0.222749
| 19,403
| 431
| 163
| 45.018561
| 0.803262
| 0.070968
| 0
| 0.516892
| 1
| 0
| 0.054207
| 0.010073
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101351
| false
| 0.010135
| 0.027027
| 0
| 0.131757
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7ab8e9f5a7581e8fccc88af115804fdaca9bb390
| 57,454
|
py
|
Python
|
core/corefgraph/test/test_sentenceCandidateExtractor_test.py
|
malv007/coreference-base
|
5fe41dda0da83a37a01220f8f0552a336f2294ef
|
[
"Apache-2.0"
] | 13
|
2015-08-07T13:14:17.000Z
|
2021-11-08T10:33:51.000Z
|
core/corefgraph/test/test_sentenceCandidateExtractor_test.py
|
malv007/coreference-base
|
5fe41dda0da83a37a01220f8f0552a336f2294ef
|
[
"Apache-2.0"
] | null | null | null |
core/corefgraph/test/test_sentenceCandidateExtractor_test.py
|
malv007/coreference-base
|
5fe41dda0da83a37a01220f8f0552a336f2294ef
|
[
"Apache-2.0"
] | 6
|
2015-02-10T17:00:35.000Z
|
2021-08-04T17:46:13.000Z
|
# coding=utf-8
__author__ = 'Josu Bermudez <josu.bermudez@deusto.es>'
__date__ = '2/4/13'
from ..multisieve.core import SentenceCandidateExtractor
from ..graph.graph_builder import BaseGraphBuilder
from unittest import TestCase
class TestSentenceCandidateExtractor(TestCase):
def setUp(self):
self.graph_builder = BaseGraphBuilder()
self.test_graph = self.graph_builder.new_graph()
self.candidate_extractor = SentenceCandidateExtractor(graph=self.test_graph)
self.root_node = self.graph_builder.add_sentence(self.test_graph, 0, "test_sentence", "test_sentence", 0)
def test_validate_np_node_without_filter(self):
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "O", "NN",
self.root_node)
self.assertFalse(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"a word is fetched as NP candidate")
test_node = self.graph_builder.add_constituent("an apple", "true", None, "NP", self.test_graph, "mention", True)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"NP candidate is not fetched")
def test_validate_pronouns_node_without_filter(self):
test_node = self.graph_builder.add_constituent("an apple", "true", None, "PRP", self.test_graph, "mention",
"true")
self.assertFalse(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"NP candidate is fetched as PRP")
# pronoun_pos = ("PRP", "PRP$", "WP", "WP$")
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "O", "PRP",
self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"PRP not fetched as candidate")
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "O", "PRP$",
self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"PRP$ not fetched as candidate")
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "O", "WP",
self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"WP not fetched as candidate")
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "O", "WP$",
self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"WP$ not fetched as candidate")
def test_validate_ne_node_without_filter(self):
test_node = self.graph_builder.add_constituent("an apple", "true", None, "VP", self.test_graph, "mention",
"true")
self.assertFalse(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"VP candidate is fetched")
#"PERSON", "NORP", "FACILITY", "ORGANIZATION", "GPE", "LOCATION", "PRODUCT", "EVENT", "WORK OF ART","LAW",
# "LANGUAGE", "DATE", "TIME"
test_node = self.graph_builder.add_constituent("an apple", "true", "PERSON", "VP", self.test_graph, "mention",
"true")
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"PERSON chunk candidate is not fetched")
test_node = self.graph_builder.add_constituent("an apple", "true", "NORP", "VP", self.test_graph, "mention",
"true")
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"NORP chunk candidate is not fetched")
test_node = self.graph_builder.add_constituent("an apple", "true", "FACILITY", "VP", self.test_graph, "mention",
"true")
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"FACILITY chunk candidate is not fetched")
test_node = self.graph_builder.add_constituent("an apple", "true", "GPE", "VP", self.test_graph, "mention",
"true")
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"GPE chunk candidate is not fetched")
test_node = self.graph_builder.add_constituent("an apple", "true", "LOCATION", "VP", self.test_graph, "mention",
"true")
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"LOCATION chunk candidate is not fetched")
test_node = self.graph_builder.add_constituent("an apple", "true", "PRODUCT", "VP", self.test_graph, "mention",
"true")
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"PRODUCT chunk candidate is not fetched")
test_node = self.graph_builder.add_constituent("an apple", "true", "EVENT", "VP", self.test_graph, "mention",
"true")
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"EVENT chunk candidate is not fetched")
test_node = self.graph_builder.add_constituent("an apple", "true", "WORK OF ART", "VP", self.test_graph,
"mention", "true")
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"WORK OF ART chunk candidate is not fetched")
test_node = self.graph_builder.add_constituent("an apple", "true", "LAW", "VP", self.test_graph, "mention",
"true")
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"LAW chunk candidate is not fetched")
test_node = self.graph_builder.add_constituent("an apple", "true", "LANGUAGE", "VP", self.test_graph, "mention",
"true")
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"LANGUAGE chunk candidate is not fetched")
test_node = self.graph_builder.add_constituent("an apple", "true", "DATE", "VP", self.test_graph, "mention",
"true")
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"DATE chunk candidate is not fetched")
test_node = self.graph_builder.add_constituent("an apple", "true", "TIME", "VP", self.test_graph, "mention",
"true")
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"TIME chunk candidate is not fetched")
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "O", "NN",
self.root_node)
self.assertFalse(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"a word is fetched as NE candidate")
#"PERSON", "NORP", "FACILITY", "ORGANIZATION", "GPE", "LOCATION", "PRODUCT", "EVENT", "WORK OF ART","LAW",
# "LANGUAGE", "DATE", "TIME"
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "PERSON", "PRP",
self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"PERSON WORD not fetched as candidate")
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "NORP", "PRP",
self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"NORP WORD not fetched as candidate")
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "FACILITY", "PRP",
self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"FACILITY WORD not fetched as candidate")
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "ORGANIZATION",
"PRP", self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"ORGANIZATION WORD not fetched as candidate")
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "GPE", "PRP",
self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"GPE WORD not fetched as candidate")
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "LOCATION", "PRP",
self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"LOCATION WORD not fetched as candidate")
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "PRODUCT", "PRP",
self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"PRODUCT WORD not fetched as candidate")
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "EVENT", "PRP",
self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"EVENT WORD not fetched as candidate")
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "WORK OF ART",
"PRP", self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"WORK OF ART WORD not fetched as candidate")
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "LAW", "PRP",
self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"LAW WORD not fetched as candidate")
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "LANGUAGE", "PRP",
self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"LANGUAGE WORD not fetched as candidate")
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "DATE", "PRP",
self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"DATE WORD not fetched as candidate")
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "TIME", "PRP",
self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node, filter_candidates=False),
"TIME WORD not fetched as candidate")
def test_validate_node_filter_stopwords(self):
STOPWORDS = ("there", 'ltd.', 'etc', "'s", 'hmm')
test_node = self.graph_builder.add_constituent("an apple", "true", "PERSON", "VP", self.test_graph, "mention",
"true")
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node),
"PERSON chunk candidate is not fetched")
for form in STOPWORDS:
test_node = self.graph_builder.add_constituent(form, "true", "PERSON", "VP", self.test_graph, "mention",
"true")
self.assertFalse(self.candidate_extractor._validate_node(mention_candidate=test_node),
"NE candidate with {0} form is fetched".format(form))
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "O", "PRP",
self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node),
"PRP not fetched as candidate")
for form in STOPWORDS:
test_node = self.graph_builder.add_word(form, self.test_graph, "1", "apple", "apple", "O", "PRP",
self.root_node)
self.assertFalse(self.candidate_extractor._validate_node(mention_candidate=test_node),
"PRP candidate with {0} form is fetched ".format(form))
test_node = self.graph_builder.add_constituent("an apple", "true", None, "NP", self.test_graph, "mention",
"true")
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node),
"NP candidate is not fetched")
for form in STOPWORDS:
test_node = self.graph_builder.add_constituent(form, "true", None, "NP", self.test_graph, "mention", "true")
self.assertFalse(self.candidate_extractor._validate_node(mention_candidate=test_node),
"NP candidate with {0} is fetched".format(form))
def test_validate_node_filter_larger_mention(self):
head_node = self.graph_builder.add_constituent("an apple", True, "PERSON", "VP", self.test_graph, "mention",
"no_head")
no_head_node = self.graph_builder.add_constituent("an apple", False, "PERSON", "VP", self.test_graph, "mention",
"head")
parent_node = self.graph_builder.add_constituent("an apple", True, "PERSON", "VP", self.test_graph, "mention",
"parent")
self.graph_builder.link_syntax_non_terminal(parent=parent_node, child=head_node)
self.graph_builder.link_syntax_non_terminal(parent=parent_node, child=no_head_node)
self.candidate_extractor._set_mention_type(parent_node, self.candidate_extractor.proper_mention)
self.assertFalse(self.candidate_extractor._validate_node(mention_candidate=head_node),
"head of a mention is fetched")
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=no_head_node),
"no head of a mention is not fetched")
def test_validate_node_filter_invalid_ner(self):
INVALIDS_NER = ("PERCENT", "MONEY", "QUANTITY", "ORDINAL", "CARDINAL")
test_node = self.graph_builder.add_constituent("an apple", "true", "PERSON", "NP", self.test_graph, "mention",
True)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node),
"PERSON chunk candidate is not fetched")
for ner in INVALIDS_NER:
test_node = self.graph_builder.add_constituent("an apple", "true", ner, "NP", self.test_graph, "mention",
True)
self.assertFalse(self.candidate_extractor._validate_node(mention_candidate=test_node),
"NP candidate with invalid NE{0} is fetched".format(ner))
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", "O", "PRP",
self.root_node)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node),
"PRP not fetched as candidate")
for ner in INVALIDS_NER:
test_node = self.graph_builder.add_word("an apple", self.test_graph, "1", "apple", "apple", ner, "PRP",
self.root_node)
self.assertFalse(self.candidate_extractor._validate_node(mention_candidate=test_node),
"PRP candidate with invalid NE({0}) form is fetched ".format(ner))
def test_validate_node_filter_quantifier_or_partitive_expressions(self):
test_node = self.graph_builder.add_constituent("apples", "true", None, "NP", self.test_graph, "mention", True)
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node),
"NP candidate is not fetched")
test_node = self.graph_builder.add_constituent("million of apples", True, None, "NP", self.test_graph,
"mention", "millions of apples")
self.assertFalse(self.candidate_extractor._validate_node(mention_candidate=test_node),
"partitive candidate is fetched")
test_node = self.graph_builder.add_constituent("any apples", True, None, "NP", self.test_graph, "mention",
"millions of apples")
self.assertFalse(self.candidate_extractor._validate_node(mention_candidate=test_node),
"quantifier candidate is fetched")
def test_validate_node_filter_pleonastic_it(self):
self.fail("TODO")
def test_validate_node_filter_nations_acronyms(self):
self.fail("TODO")
def test_validate_node_filter_nationality(self):
test_node = self.graph_builder.add_constituent("apples", True, None, "NP", self.test_graph, "mention", "apples")
self.assertTrue(self.candidate_extractor._validate_node(mention_candidate=test_node),
"NP candidate is not fetched")
test_node = self.graph_builder.add_constituent("American", True, None, "NP", self.test_graph, "mention",
"millions of apples")
self.assertFalse(self.candidate_extractor._validate_node(mention_candidate=test_node),
"nationality candidate is fetched")
def test_skip_root(self):
stanford_sentence_root = self.graph_builder.add_sentence(self.test_graph, 0, "ROOT", "SentenceTest", 1)
conll_sentence_root = self.graph_builder.add_sentence(self.test_graph, 0, "ROOT", "SentenceTest", 1)
s_chunk = self.graph_builder.add_constituent("the way of samurai", True, None, "S", self.test_graph, "s",
"the way of samurai")
root_chunk = self.graph_builder.add_constituent("dummyRoot", True, None, "ROOT", self.test_graph, "dummyroot",
"dummyRoot")
plain_chunk = self.graph_builder.add_constituent("apples", True, None, "NP", self.test_graph, "mention",
"apples")
self.graph_builder.link_syntax_non_terminal(root_chunk, s_chunk)
self.graph_builder.link_syntax_non_terminal(stanford_sentence_root, root_chunk)
self.graph_builder.link_syntax_non_terminal(conll_sentence_root, s_chunk)
self.graph_builder.link_syntax_non_terminal(s_chunk, plain_chunk)
self.assertEqual(s_chunk, self.candidate_extractor._skip_root(stanford_sentence_root), "root not skipped")
self.assertEqual(s_chunk, self.candidate_extractor._skip_root(conll_sentence_root), "root not skipped")
self.assertEqual(s_chunk, self.candidate_extractor._skip_root(s_chunk), "no ROOT chunk skipped")
def test_get_syntactic_parent(self):
stanford_sentence_root = self.graph_builder.add_sentence(self.test_graph, 0, "ROOT", "SentenceTest", 1)
root_chunk = self.graph_builder.add_constituent("dummyRoot", True, None, "ROOT", self.test_graph, "dummyroot",
"dummyRoot")
s_chunk = self.graph_builder.add_constituent("the way of samurai", True, None, "S", self.test_graph, "s",
"the way of samurai")
plain_chunk = self.graph_builder.add_constituent("apples", True, None, "NP", self.test_graph, "mention",
"apples")
self.graph_builder.link_syntax_non_terminal(s_chunk, plain_chunk)
self.graph_builder.link_syntax_non_terminal(root_chunk, s_chunk)
self.graph_builder.link_syntax_non_terminal(stanford_sentence_root, root_chunk)
self.assertEqual(s_chunk, self.candidate_extractor.get_syntactic_parent(plain_chunk),
"No direct parent fetched")
self.assertIsNone(self.candidate_extractor.get_syntactic_parent(stanford_sentence_root),
"Parent fetched for root")
def test_get_syntactic_children(self):
stanford_sentence_root = self.graph_builder.add_sentence(self.test_graph, 0, "ROOT", "SentenceTest", 1)
root_chunk = self.graph_builder.add_constituent("dummyRoot", True, None, "ROOT", self.test_graph, "dummyroot",
"dummyRoot")
s_chunk = self.graph_builder.add_constituent("the way of samurai", True, None, "S", self.test_graph, "s",
"the way of samurai")
plain_chunk = self.graph_builder.add_constituent("apples", True, None, "NP", self.test_graph, "mention",
"apples")
self.graph_builder.link_syntax_non_terminal(s_chunk, plain_chunk)
self.graph_builder.link_syntax_non_terminal(root_chunk, s_chunk)
self.graph_builder.link_syntax_non_terminal(stanford_sentence_root, root_chunk)
self.assertListEqual([root_chunk], self.candidate_extractor.get_syntactic_children(stanford_sentence_root),
"No children fetched")
self.assertListEqual([], self.candidate_extractor.get_syntactic_children(plain_chunk),
"Parent fetched for leaf")
def test_order_constituent_simple(self):
stanford_sentence_root = self.graph_builder.add_sentence(self.test_graph, 0, "DummyRoot", "SentenceTest", 1)
s_chunk = self.graph_builder.add_constituent("He played a song", False, None, "S", self.test_graph,
"S He played a song", "he played a song")
he_NP_chunk = self.graph_builder.add_constituent("He", True, None, "NP", self.test_graph, "NP he", "he")
he_PRP_word = self.graph_builder.add_word("He", self.test_graph, "word_O",
"PRP He", "he", "O", "PRP", stanford_sentence_root)
self.graph_builder.set_head(he_PRP_word)
played_VP_chunk = self.graph_builder.add_constituent("played", True, None, "VP", self.test_graph, "VP played",
"played")
played_VBD_word = self.graph_builder.add_word("played", self.test_graph, "word_1",
"VBD played", "played", "O", "VBD", stanford_sentence_root)
a_new_song_NP_chunk = self.graph_builder.add_constituent("a new song", False, None, "NP", self.test_graph,
"NP a new song", "a new song")
a_DET_word = self.graph_builder.add_word("a", self.test_graph, "word_1",
"DET a", "a", "O", "DET", stanford_sentence_root)
new_JJ_word = self.graph_builder.add_word("new", self.test_graph, "word_2",
"JJ new", "new", "O", "JJ", stanford_sentence_root)
song_NN_word = self.graph_builder.add_word("song", self.test_graph, "word_3",
"NN song", "song", "O", "NN", stanford_sentence_root)
point_word = self.graph_builder.add_word(".", self.test_graph, "word_4",
". .", ".", "O", ".", stanford_sentence_root)
self.graph_builder.link_syntax_non_terminal(stanford_sentence_root, s_chunk)
self.graph_builder.link_syntax_non_terminal(he_NP_chunk, he_PRP_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, he_NP_chunk)
self.graph_builder.set_head(played_VBD_word)
self.graph_builder.link_syntax_non_terminal(played_VP_chunk, played_VBD_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, played_VP_chunk)
self.graph_builder.link_syntax_non_terminal(s_chunk, a_new_song_NP_chunk)
self.graph_builder.link_syntax_non_terminal(a_new_song_NP_chunk, a_DET_word)
self.graph_builder.link_syntax_non_terminal(a_new_song_NP_chunk, new_JJ_word)
self.graph_builder.link_syntax_non_terminal(a_new_song_NP_chunk, song_NN_word)
self.graph_builder.set_head(song_NN_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, point_word)
candidatures, next_sentence_candidates = self.candidate_extractor.process_sentence(stanford_sentence_root, [])
self.assertEqual(len(candidatures), 2)
self.assertListEqual(candidatures, [([he_NP_chunk], []), ([a_new_song_NP_chunk], [he_NP_chunk])])
def test_order_constituent_double(self):
next_sentence_candidates = []
stanford_sentence_root = self.graph_builder.add_sentence(self.test_graph, 0, "DummyRoot", "SentenceTest", 1)
s_chunk = self.graph_builder.add_constituent("John is a musician", False, None, "S", self.test_graph,
"S Jhon is a musician", "jhon is a musician")
#(NP (NNP John))
john_NP_chunk = self.graph_builder.add_constituent("John", True, "PERSON", "NP", self.test_graph, "NP John",
"john")
john_NNP_word = self.graph_builder.add_word("John", self.test_graph, "word_O",
"NNP John", "john", "PERSON", "NNP", stanford_sentence_root)
#(VP (VBZ is)
is_VP_chunk = self.graph_builder.add_constituent("is", True, None, "VP", self.test_graph, "VP is", "is")
is_VBZ_word = self.graph_builder.add_word("is", self.test_graph, "word_1",
"VBZ is", "be", "O", "VBZ", stanford_sentence_root)
#(NP (DT a) (NN musician)))
a_musician_NP_chunk = self.graph_builder.add_constituent("a musician", False, None, "NP", self.test_graph,
"NP a musician", "a musician")
a_DET_word = self.graph_builder.add_word("a", self.test_graph, "word_2",
"DET a", "a", "O", "DET", stanford_sentence_root)
musician_NN_word = self.graph_builder.add_word("musician", self.test_graph, "word_3",
"NN musician", "musician", "O", "NN", stanford_sentence_root)
#(. .)
point_word = self.graph_builder.add_word(".", self.test_graph, "word_4",
". .", ".", "O", ".", stanford_sentence_root)
self.graph_builder.link_syntax_non_terminal(stanford_sentence_root, s_chunk)
self.graph_builder.link_syntax_non_terminal(john_NP_chunk, john_NNP_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, john_NP_chunk)
self.graph_builder.set_head(john_NNP_word)
self.graph_builder.link_syntax_non_terminal(is_VP_chunk, is_VBZ_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, is_VP_chunk)
self.graph_builder.set_head(is_VBZ_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, a_musician_NP_chunk)
self.graph_builder.link_syntax_non_terminal(a_musician_NP_chunk, a_DET_word)
self.graph_builder.link_syntax_non_terminal(a_musician_NP_chunk, musician_NN_word)
self.graph_builder.set_head(musician_NN_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, point_word)
candidatures, next_sentence_candidates = self.candidate_extractor.process_sentence(
stanford_sentence_root, next_sentence_candidates)
self.assertEqual(len(candidatures), 2)
self.assertListEqual(candidatures,
[([john_NP_chunk], []),
([a_musician_NP_chunk], [john_NP_chunk]),
])
stanford_sentence_root = self.graph_builder.add_sentence(self.test_graph, 0, "DummyRoot", "SentenceTest", 1)
s_chunk = self.graph_builder.add_constituent("He played a song", False, None, "S", self.test_graph,
"S He played a song", "he played a song")
he_NP_chunk = self.graph_builder.add_constituent("He", True, None, "NP", self.test_graph, "NP he", "he")
he_PRP_word = self.graph_builder.add_word("He", self.test_graph, "word_O",
"PRP He", "he", "O", "PRP", stanford_sentence_root)
played_VP_chunk = self.graph_builder.add_constituent("played", True, None, "VP", self.test_graph, "VP played",
"played")
played_VBD_word = self.graph_builder.add_word("played", self.test_graph, "word_1",
"VBD played", "played", "O", "VBD", stanford_sentence_root)
a_new_song_NP_chunk = self.graph_builder.add_constituent("a new song", False, None, "NP", self.test_graph,
"NP a new song", "a new song")
a_DET_word = self.graph_builder.add_word("a", self.test_graph, "word_1",
"DET a", "a", "O", "DET", stanford_sentence_root)
new_JJ_word = self.graph_builder.add_word("new", self.test_graph, "word_2",
"JJ new", "new", "O", "JJ", stanford_sentence_root)
song_NN_word = self.graph_builder.add_word("song", self.test_graph, "word_3",
"NN song", "song", "O", "NN", stanford_sentence_root)
point_word = self.graph_builder.add_word(".", self.test_graph, "word_4",
". .", ".", "O", ".", stanford_sentence_root)
self.graph_builder.link_syntax_non_terminal(stanford_sentence_root, s_chunk)
self.graph_builder.link_syntax_non_terminal(he_NP_chunk, he_PRP_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, he_NP_chunk)
self.graph_builder.set_head(he_PRP_word)
self.graph_builder.link_syntax_non_terminal(played_VP_chunk, played_VBD_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, played_VP_chunk)
self.graph_builder.set_head(played_VBD_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, a_new_song_NP_chunk)
self.graph_builder.link_syntax_non_terminal(a_new_song_NP_chunk, a_DET_word)
self.graph_builder.link_syntax_non_terminal(a_new_song_NP_chunk, new_JJ_word)
self.graph_builder.link_syntax_non_terminal(a_new_song_NP_chunk, song_NN_word)
self.graph_builder.set_head(song_NN_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, point_word)
candidatures, next_sentence_candidates = self.candidate_extractor.process_sentence(
stanford_sentence_root, next_sentence_candidates)
self.assertEqual(len(candidatures), 2)
self.assertListEqual(candidatures, [([he_NP_chunk], [john_NP_chunk, a_musician_NP_chunk]),
([a_new_song_NP_chunk], [he_NP_chunk, john_NP_chunk, a_musician_NP_chunk]),
])
def test_order_constituent_full(self):
next_sentence_candidates = []
stanford_sentence_root = self.graph_builder.add_sentence(self.test_graph, 0, "DummyRoot", "SentenceTest", 1)
s_chunk = self.graph_builder.add_constituent("John is a musician", False, None, "S", self.test_graph,
"S John is a musician", "john is a musician")
#(NP (NNP John))
john_NP_chunk = self.graph_builder.add_constituent("John", True, "PERSON", "NP", self.test_graph, "NP John",
"john")
john_NNP_word = self.graph_builder.add_word("John", self.test_graph, "word_O",
"NNP John", "john", "PERSON", "NNP", stanford_sentence_root)
#(VP (VBZ is)
is_VP_chunk = self.graph_builder.add_constituent("is", True, None, "VP", self.test_graph, "VP is", "is")
is_VBZ_word = self.graph_builder.add_word("is", self.test_graph, "word_1",
"VBZ is", "be", "O", "VBZ", stanford_sentence_root)
#(NP (DT a) (NN musician)))
a_musician_NP_chunk = self.graph_builder.add_constituent("a musician", False, None, "NP", self.test_graph,
"NP a musician", "a musician")
a_DT_word = self.graph_builder.add_word("a", self.test_graph, "word_2",
"DT a", "a", "O", "DT", stanford_sentence_root)
musician_NN_word = self.graph_builder.add_word("musician", self.test_graph, "word_3",
"NN musician", "musician", "O", "NN", stanford_sentence_root)
#(. .)
point_word = self.graph_builder.add_word(".", self.test_graph, "word_4",
". .", ".", "O", ".", stanford_sentence_root)
self.graph_builder.link_syntax_non_terminal(stanford_sentence_root, s_chunk)
self.graph_builder.link_syntax_non_terminal(john_NP_chunk, john_NNP_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, john_NP_chunk)
self.graph_builder.set_head(john_NNP_word)
self.graph_builder.link_syntax_non_terminal(is_VP_chunk, is_VBZ_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, is_VP_chunk)
self.graph_builder.set_head(is_VBZ_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, a_musician_NP_chunk)
self.graph_builder.link_syntax_non_terminal(a_musician_NP_chunk, a_DT_word)
self.graph_builder.link_syntax_non_terminal(a_musician_NP_chunk, musician_NN_word)
self.graph_builder.set_head(musician_NN_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, point_word)
candidatures, next_sentence_candidates = self.candidate_extractor.process_sentence(
stanford_sentence_root, next_sentence_candidates)
self.assertEqual(len(candidatures), 2)
self.assertListEqual(candidatures,
[([john_NP_chunk], []),
([a_musician_NP_chunk], [john_NP_chunk]),
])
stanford_sentence_root = self.graph_builder.add_sentence(self.test_graph, 0, "DummyRoot", "SentenceTest", 1)
s_chunk = self.graph_builder.add_constituent("He played a song", False, None, "S", self.test_graph,
"S He played a song", "he played a song")
he_NP_chunk = self.graph_builder.add_constituent("He", True, None, "NP", self.test_graph, "NP he", "he")
he_PRP_word = self.graph_builder.add_word("He", self.test_graph, "word_O",
"PRP He", "he", "O", "PRP", stanford_sentence_root)
played_VP_chunk = self.graph_builder.add_constituent("played", True, None, "VP", self.test_graph, "VP played",
"played")
played_VBD_word = self.graph_builder.add_word("played", self.test_graph, "word_1",
"VBD played", "played", "O", "VBD", stanford_sentence_root)
a_new_song_NP_chunk = self.graph_builder.add_constituent("a new song", False, None, "NP", self.test_graph,
"NP a new song", "a new song")
a_DT_word = self.graph_builder.add_word("a", self.test_graph, "word_1",
"DT a", "a", "O", "DT", stanford_sentence_root)
new_JJ_word = self.graph_builder.add_word("new", self.test_graph, "word_2",
"JJ new", "new", "O", "JJ", stanford_sentence_root)
song_NN_word = self.graph_builder.add_word("song", self.test_graph, "word_3",
"NN song", "song", "O", "NN", stanford_sentence_root)
point_word = self.graph_builder.add_word(".", self.test_graph, "word_4",
". .", ".", "O", ".", stanford_sentence_root)
self.graph_builder.link_syntax_non_terminal(stanford_sentence_root, s_chunk)
self.graph_builder.link_syntax_non_terminal(he_NP_chunk, he_PRP_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, he_NP_chunk)
self.graph_builder.set_head(he_PRP_word)
self.graph_builder.link_syntax_non_terminal(played_VP_chunk, played_VBD_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, played_VP_chunk)
self.graph_builder.set_head(played_VBD_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, a_new_song_NP_chunk)
self.graph_builder.link_syntax_non_terminal(a_new_song_NP_chunk, a_DT_word)
self.graph_builder.link_syntax_non_terminal(a_new_song_NP_chunk, new_JJ_word)
self.graph_builder.link_syntax_non_terminal(a_new_song_NP_chunk, song_NN_word)
self.graph_builder.set_head(song_NN_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, point_word)
candidatures, next_sentence_candidates = self.candidate_extractor.process_sentence(
stanford_sentence_root, next_sentence_candidates)
self.assertEqual(len(candidatures), 2)
self.assertListEqual(candidatures, [([he_NP_chunk], [john_NP_chunk, a_musician_NP_chunk]),
([a_new_song_NP_chunk], [he_NP_chunk, john_NP_chunk, a_musician_NP_chunk]),
])
# ROOT (S
stanford_sentence_root = self.graph_builder.add_sentence(self.test_graph, 0, "DummyRoot", "SentenceTest", 1)
s_chunk = self.graph_builder.add_constituent("A girl was listening to the song", False, None, "S",
self.test_graph, "S A girl was listening to the song",
"A girl was listening to the song")
# (NP (DT A) (NN girl))
a_girl_NP_chunk = self.graph_builder.add_constituent("a girl", False, None, "NP", self.test_graph, "NP a girl",
"a girl")
a_DT_word = self.graph_builder.add_word("a", self.test_graph, "word_2",
"DT a", "a", "O", "DT", stanford_sentence_root)
girl_NN_word = self.graph_builder.add_word("girl", self.test_graph, "word_3",
"NN girl", "girl", "O", "NN", stanford_sentence_root)
# (VP (VBD was) (VP (VBG listening)
was_listening_VP_chunk = self.graph_builder.add_constituent("was listening", True, None, "VP", self.test_graph,
"VP was listening", "be listen")
was_VBD_word = self.graph_builder.add_word("was", self.test_graph, "word_1",
"VBD is", "be", "O", "VBD", stanford_sentence_root)
listening_VBG_word = self.graph_builder.add_word("listening", self.test_graph, "word_1",
"VBG listening", "listen", "O", "VBG", stanford_sentence_root)
# (PP (TO to) (NP (DT the) (NN song)))))
to_the_song_PP_chunk = self.graph_builder.add_constituent("to the song", False, None, "PP", self.test_graph,
"PP to the song", "to the song")
to_TO_word = self.graph_builder.add_word("to", self.test_graph, "word_2",
"TO to", "to", "O", "TO", stanford_sentence_root)
the_song_NP_chunk = self.graph_builder.add_constituent("the song", False, None, "NP", self.test_graph,
"NP the song", "the song")
the_DT_word = self.graph_builder.add_word("the", self.test_graph, "word_2",
"DT the", "the", "O", "DT", stanford_sentence_root)
song_NN_word = self.graph_builder.add_word("song", self.test_graph, "word_3",
"NN song", "song", "O", "DT", stanford_sentence_root)
# (. .)
point_word = self.graph_builder.add_word(".", self.test_graph, "word_4",
". .", ".", "O", ".", stanford_sentence_root)
self.graph_builder.link_syntax_non_terminal(stanford_sentence_root, s_chunk)
self.graph_builder.link_syntax_non_terminal(a_girl_NP_chunk, a_DT_word)
self.graph_builder.link_syntax_non_terminal(a_girl_NP_chunk, girl_NN_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, a_girl_NP_chunk)
self.graph_builder.set_head(girl_NN_word)
self.graph_builder.link_syntax_non_terminal(was_listening_VP_chunk, was_VBD_word)
self.graph_builder.link_syntax_non_terminal(was_listening_VP_chunk, listening_VBG_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, was_listening_VP_chunk)
self.graph_builder.set_head(listening_VBG_word)
self.graph_builder.link_syntax_non_terminal(the_song_NP_chunk, the_DT_word)
self.graph_builder.link_syntax_non_terminal(the_song_NP_chunk, song_NN_word)
self.graph_builder.link_syntax_non_terminal(to_the_song_PP_chunk, the_song_NP_chunk)
self.graph_builder.set_head(song_NN_word)
self.graph_builder.link_syntax_non_terminal(to_the_song_PP_chunk, to_TO_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, to_the_song_PP_chunk)
self.graph_builder.set_head(the_song_NP_chunk)
self.graph_builder.link_syntax_non_terminal(s_chunk, point_word)
self.graph_builder.set_head(was_listening_VP_chunk)
candidatures, next_sentence_candidates = self.candidate_extractor.process_sentence(
stanford_sentence_root, next_sentence_candidates)
self.assertEqual(len(candidatures), 2)
self.assertListEqual(candidatures,
[([a_girl_NP_chunk], [he_NP_chunk, a_new_song_NP_chunk, john_NP_chunk, a_musician_NP_chunk
]),
([the_song_NP_chunk],
[a_girl_NP_chunk, he_NP_chunk, a_new_song_NP_chunk, john_NP_chunk, a_musician_NP_chunk
]),
])
#(ROOT (S
stanford_sentence_root = self.graph_builder.add_sentence(self.test_graph, 0, "DummyRoot", "SentenceTest", 1)
s_chunk = self.graph_builder.add_constituent('"It is my favorite", John said to her.', False, None, "S",
self.test_graph, 'S "It is my favorite", John said to her.',
'"It is my favorite", John said to her.')
# (S
inner_s_chunk = self.graph_builder.add_constituent('"It is my favorite"', False, None, "S", self.test_graph,
'S "It is my favorite"', '"It is my favorite"')
# (`` ``)
open_word = self.graph_builder.add_word("``", self.test_graph, "word_4",
"`` ``", "``", "O", ",", stanford_sentence_root)
# (NP (PRP It))
it_NP_chunk = self.graph_builder.add_constituent("It", True, None, "NP", self.test_graph, "NP It", "it")
it_PRP_word = self.graph_builder.add_word("It", self.test_graph, "word_O",
"PRP It", "it", "O", "PRP", stanford_sentence_root)
# (VP (VBZ is)
is_VP_chunk = self.graph_builder.add_constituent("is", True, None, "VP", self.test_graph, "VP is", "is")
is_VBZ_word = self.graph_builder.add_word("is", self.test_graph, "word_1",
"VBZ is", "be", "O", "VBZ", stanford_sentence_root)
# (NP (PRP$ my) (JJ favorite)))
my_favorite_NP_chunk = self.graph_builder.add_constituent("my favorite", False, None, "NP", self.test_graph,
"NP my favorite", "my favorite")
my_PRP_word = self.graph_builder.add_word("my", self.test_graph, "word_2",
"PRP$ my", "my", "O", "PRP$", stanford_sentence_root)
favorite_JJ_word = self.graph_builder.add_word("favorite", self.test_graph, "word_3",
"JJ favorite", "favorite", "O", "JJ", stanford_sentence_root)
# ('' '')
close_word = self.graph_builder.add_word("''", self.test_graph, "word_4",
"'' ''", "''", "O", ",", stanford_sentence_root)
# )
# (, ,)
coma_word = self.graph_builder.add_word(",", self.test_graph, "word_4",
", ,", ",", "O", ",", stanford_sentence_root)
# (NP (NNP John))
john_NP_chunk = self.graph_builder.add_constituent("John", True, "PERSON", "NP", self.test_graph, "NP John",
"john")
john_NNP_word = self.graph_builder.add_word("John", self.test_graph, "word_O",
"NNP John", "john", "PERSON", "NNP", stanford_sentence_root)
# (VP (VBD said)
said_VP_chunk = self.graph_builder.add_constituent("said", True, None, "VP", self.test_graph, "VP said", "said")
said_VBD_word = self.graph_builder.add_word("said", self.test_graph, "word_1",
"VBD said", "say", "O", "VBD", stanford_sentence_root)
# (PP (TO to) (NP (PRP her))))
to_her_PP_chunk = self.graph_builder.add_constituent("to her", False, None, "PP", self.test_graph, "PP to her",
"to the song")
to_TO_word = self.graph_builder.add_word("to", self.test_graph, "word_2",
"TO to", "to", "O", "TO", stanford_sentence_root)
her_NP_chunk = self.graph_builder.add_constituent("her", False, None, "NP", self.test_graph, "NP her", "her")
her_PRP_word = self.graph_builder.add_word("her", self.test_graph, "word_2",
"PRP her", "her", "O", "PRP", stanford_sentence_root)
# (. .))
point_word = self.graph_builder.add_word(".", self.test_graph, "word_4",
". .", ".", "O", ".", stanford_sentence_root)
self.graph_builder.link_syntax_non_terminal(stanford_sentence_root, s_chunk)
self.graph_builder.link_syntax_non_terminal(inner_s_chunk, open_word)
self.graph_builder.link_syntax_non_terminal(it_NP_chunk, it_PRP_word)
self.graph_builder.link_syntax_non_terminal(inner_s_chunk, it_NP_chunk)
self.graph_builder.set_head(it_PRP_word)
self.graph_builder.link_syntax_non_terminal(is_VBZ_word, is_VP_chunk)
self.graph_builder.link_syntax_non_terminal(inner_s_chunk, is_VBZ_word)
self.graph_builder.set_head(is_VBZ_word)
self.graph_builder.link_syntax_non_terminal(my_favorite_NP_chunk, my_PRP_word)
self.graph_builder.link_syntax_non_terminal(my_favorite_NP_chunk, favorite_JJ_word)
self.graph_builder.link_syntax_non_terminal(inner_s_chunk, my_favorite_NP_chunk)
self.graph_builder.set_head(favorite_JJ_word)
self.graph_builder.link_syntax_non_terminal(inner_s_chunk, close_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, inner_s_chunk)
self.graph_builder.link_syntax_non_terminal(s_chunk, coma_word)
self.graph_builder.link_syntax_non_terminal(john_NP_chunk, john_NNP_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, john_NP_chunk)
self.graph_builder.set_head(john_NNP_word)
self.graph_builder.link_syntax_non_terminal(said_VP_chunk, said_VBD_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, said_VP_chunk)
self.graph_builder.set_head(said_VBD_word)
self.graph_builder.link_syntax_non_terminal(her_NP_chunk, her_PRP_word)
self.graph_builder.link_syntax_non_terminal(to_her_PP_chunk, her_NP_chunk)
self.graph_builder.set_head(her_PRP_word)
self.graph_builder.link_syntax_non_terminal(to_her_PP_chunk, to_TO_word)
self.graph_builder.set_head(her_NP_chunk)
self.graph_builder.link_syntax_non_terminal(s_chunk, to_her_PP_chunk)
self.graph_builder.link_syntax_non_terminal(s_chunk, point_word)
self.graph_builder.set_head(said_VP_chunk)
candidatures, next_sentence_candidates = self.candidate_extractor.process_sentence(
stanford_sentence_root, next_sentence_candidates)
#self.assertEqual(len(candidatures), 5)
self.assertListEqual(candidatures, [([it_NP_chunk], [a_girl_NP_chunk, the_song_NP_chunk, he_NP_chunk,
a_new_song_NP_chunk, john_NP_chunk, a_musician_NP_chunk]),
([my_favorite_NP_chunk], [it_NP_chunk, a_girl_NP_chunk, the_song_NP_chunk,
he_NP_chunk, a_new_song_NP_chunk, john_NP_chunk,
a_musician_NP_chunk]),
([my_PRP_word], [it_NP_chunk, my_favorite_NP_chunk, a_girl_NP_chunk,
the_song_NP_chunk, he_NP_chunk, a_new_song_NP_chunk,
john_NP_chunk, a_musician_NP_chunk]),
([john_NP_chunk], [it_NP_chunk, my_favorite_NP_chunk, my_PRP_word,
a_girl_NP_chunk, the_song_NP_chunk, he_NP_chunk,
a_new_song_NP_chunk, john_NP_chunk,
a_musician_NP_chunk]),
([her_NP_chunk], [john_NP_chunk, it_NP_chunk, my_favorite_NP_chunk,
my_PRP_word, a_girl_NP_chunk, the_song_NP_chunk,
he_NP_chunk, a_new_song_NP_chunk, john_NP_chunk,
a_musician_NP_chunk])
])
def test_process_sentence_simple(self):
stanford_sentence_root = self.graph_builder.add_sentence(self.test_graph, 0, "DummyRoot", "SentenceTest", 1)
s_chunk = self.graph_builder.add_constituent("He played a song", False, None, "S", self.test_graph,
"S He played a song", "he played a song")
he_NP_chunk = self.graph_builder.add_constituent("He", True, None, "NP", self.test_graph, "NP he", "he")
he_PRP_word = self.graph_builder.add_word("He", self.test_graph, "word_O",
"PRP He", "he", "O", "PRP", stanford_sentence_root)
self.graph_builder.set_head(he_PRP_word)
played_VP_chunk = self.graph_builder.add_constituent("played", True, None, "VP", self.test_graph, "VP played",
"played")
played_VBD_word = self.graph_builder.add_word("played", self.test_graph, "word_1",
"VBD played", "played", "O", "VBD", stanford_sentence_root)
a_new_song_NP_chunk = self.graph_builder.add_constituent("a new song", False, None, "NP", self.test_graph,
"NP a new song", "a new song")
a_DET_word = self.graph_builder.add_word("a", self.test_graph, "word_1",
"DET a", "a", "O", "DET", stanford_sentence_root)
new_JJ_word = self.graph_builder.add_word("new", self.test_graph, "word_2",
"JJ new", "new", "O", "JJ", stanford_sentence_root)
song_NN_word = self.graph_builder.add_word("song", self.test_graph, "word_3",
"NN song", "song", "O", "NN", stanford_sentence_root)
point_word = self.graph_builder.add_word(".", self.test_graph, "word_4",
". .", ".", "O", ".", stanford_sentence_root)
self.graph_builder.link_syntax_non_terminal(stanford_sentence_root, s_chunk)
self.graph_builder.link_syntax_non_terminal(he_NP_chunk, he_PRP_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, he_NP_chunk)
self.graph_builder.set_head(played_VBD_word)
self.graph_builder.link_syntax_non_terminal(played_VP_chunk, played_VBD_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, played_VP_chunk)
self.graph_builder.link_syntax_non_terminal(s_chunk, a_new_song_NP_chunk)
self.graph_builder.link_syntax_non_terminal(a_new_song_NP_chunk, a_DET_word)
self.graph_builder.link_syntax_non_terminal(a_new_song_NP_chunk, new_JJ_word)
self.graph_builder.link_syntax_non_terminal(a_new_song_NP_chunk, song_NN_word)
self.graph_builder.set_head(song_NN_word)
self.graph_builder.link_syntax_non_terminal(s_chunk, point_word)
candidatures, next_sentence_candidates = self.candidate_extractor.process_sentence(stanford_sentence_root, [])
self.assertEqual(len(candidatures), 2)
self.assertListEqual(candidatures, [([he_NP_chunk], []), ([a_new_song_NP_chunk], [he_NP_chunk])])
def test_process(self):
self.fail("TODO")
| 68.889688
| 120
| 0.59914
| 6,788
| 57,454
| 4.687537
| 0.027401
| 0.114648
| 0.152362
| 0.099123
| 0.940696
| 0.92825
| 0.909394
| 0.876835
| 0.857821
| 0.846193
| 0
| 0.002798
| 0.297177
| 57,454
| 833
| 121
| 68.972389
| 0.785191
| 0.012897
| 0
| 0.622527
| 0
| 0
| 0.11267
| 0.000441
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.028919
| false
| 0
| 0.004566
| 0
| 0.035008
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8fab177fcde6d105f3c6cb4859ee05acb21984df
| 101
|
py
|
Python
|
ScheduledDeliveryWebApplication/app/validators/email_validator.py
|
leitao-bcc/MovileNext3_Backend_LucasLeitao
|
15bdd8a96711a2e305078cd2f152b86374dbe276
|
[
"Unlicense"
] | null | null | null |
ScheduledDeliveryWebApplication/app/validators/email_validator.py
|
leitao-bcc/MovileNext3_Backend_LucasLeitao
|
15bdd8a96711a2e305078cd2f152b86374dbe276
|
[
"Unlicense"
] | null | null | null |
ScheduledDeliveryWebApplication/app/validators/email_validator.py
|
leitao-bcc/MovileNext3_Backend_LucasLeitao
|
15bdd8a96711a2e305078cd2f152b86374dbe276
|
[
"Unlicense"
] | null | null | null |
from re import match
def is_valid_email(var):
return match(r"[^@]+@[^@]+\.[^@]+", var) != None
| 16.833333
| 52
| 0.554455
| 14
| 101
| 3.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168317
| 101
| 5
| 53
| 20.2
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0.178218
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
8fb1241c6dc836f1dcf41866e81c183d54c4c599
| 43
|
py
|
Python
|
test_folder/test_file.py
|
toonNvk/flask-test
|
c5a608e769abc8f57909dce3097956f31ec3b71d
|
[
"bzip2-1.0.6"
] | null | null | null |
test_folder/test_file.py
|
toonNvk/flask-test
|
c5a608e769abc8f57909dce3097956f31ec3b71d
|
[
"bzip2-1.0.6"
] | null | null | null |
test_folder/test_file.py
|
toonNvk/flask-test
|
c5a608e769abc8f57909dce3097956f31ec3b71d
|
[
"bzip2-1.0.6"
] | null | null | null |
def testfunction():
print("test function")
| 21.5
| 23
| 0.744186
| 5
| 43
| 6.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 2
| 23
| 21.5
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0.295455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
8fcf7011c611d1d07433a167d2fd772d3ffb1102
| 2,580
|
py
|
Python
|
tests/test_searcher_noncollinear.py
|
petavazohi/PyChemia
|
e779389418771c25c830aed360773c63bb069372
|
[
"MIT"
] | 67
|
2015-01-31T07:44:55.000Z
|
2022-03-21T21:43:34.000Z
|
tests/test_searcher_noncollinear.py
|
petavazohi/PyChemia
|
e779389418771c25c830aed360773c63bb069372
|
[
"MIT"
] | 13
|
2016-06-03T19:07:51.000Z
|
2022-03-31T04:20:40.000Z
|
tests/test_searcher_noncollinear.py
|
petavazohi/PyChemia
|
e779389418771c25c830aed360773c63bb069372
|
[
"MIT"
] | 37
|
2015-01-22T15:37:23.000Z
|
2022-03-21T15:38:10.000Z
|
import os
import unittest
from pychemia import pcm_log, HAS_PYMONGO
from pychemia.population import NonCollinearMagMoms
from pychemia.searcher import HarmonySearch, FireFly, GeneticAlgorithm
from .local_mongo import has_local_mongo
import logging
class SearcherTest(unittest.TestCase):
def test_harmony(self):
"""
Test (pychemia.searcher.harmony) with NonCollinearMagMoms :
"""
logging.basicConfig(level=logging.DEBUG)
if not HAS_PYMONGO:
print('Could not load pymongo, leaving now')
return
else:
if not has_local_mongo():
return
pcm_log.debug('HarmonySearch')
source = 'tests/data/vasp_02'
assert os.path.isfile(source + os.sep + 'INCAR')
assert os.path.isfile(source + os.sep + 'POSCAR')
popu = NonCollinearMagMoms('test', source, debug=True)
popu.pcdb.clean()
searcher = HarmonySearch(popu, generation_size=16, stabilization_limit=5)
searcher.run()
popu.pcdb.clean()
def test_firefly(self):
"""
Test (pychemia.searcher.firefly) with NonCollinearMagMoms :
"""
logging.basicConfig(level=logging.DEBUG)
if not HAS_PYMONGO:
print('Could not load pymongo, leaving now')
return
else:
if not has_local_mongo():
return
pcm_log.debug('HarmonySearch')
source = 'tests/data/vasp_02'
assert os.path.isfile(source + os.sep + 'INCAR')
assert os.path.isfile(source + os.sep + 'POSCAR')
popu = NonCollinearMagMoms('test', source, debug=True)
popu.pcdb.clean()
searcher = FireFly(popu, generation_size=16, stabilization_limit=5)
searcher.run()
popu.pcdb.clean()
def test_genetic(self):
"""
Test (pychemia.searcher.genetic) with NonCollinearMagMoms :
"""
logging.basicConfig(level=logging.DEBUG)
if not HAS_PYMONGO:
print('Could not load pymongo, leaving now')
return
else:
if not has_local_mongo():
return
pcm_log.debug('HarmonySearch')
source = 'tests/data/vasp_02'
assert os.path.isfile(source + os.sep + 'INCAR')
assert os.path.isfile(source + os.sep + 'POSCAR')
popu = NonCollinearMagMoms('test', source, debug=True)
popu.pcdb.clean()
searcher = GeneticAlgorithm(popu, generation_size=16, stabilization_limit=5)
searcher.run()
popu.pcdb.clean()
| 32.25
| 84
| 0.615116
| 282
| 2,580
| 5.524823
| 0.216312
| 0.019255
| 0.030809
| 0.06932
| 0.742619
| 0.742619
| 0.742619
| 0.742619
| 0.742619
| 0.742619
| 0
| 0.008126
| 0.284496
| 2,580
| 79
| 85
| 32.658228
| 0.835861
| 0.071705
| 0
| 0.762712
| 0
| 0
| 0.104516
| 0
| 0
| 0
| 0
| 0
| 0.101695
| 1
| 0.050847
| false
| 0
| 0.118644
| 0
| 0.288136
| 0.050847
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8fdd2b1475d4f388d312ef8c7429a76e05d0ce0a
| 102
|
py
|
Python
|
ex1/lib/evaluation.py
|
tkauf15k/sos2020
|
b75188097d095e4acaca32290ba4f49fa8cb6c0e
|
[
"Apache-2.0"
] | null | null | null |
ex1/lib/evaluation.py
|
tkauf15k/sos2020
|
b75188097d095e4acaca32290ba4f49fa8cb6c0e
|
[
"Apache-2.0"
] | null | null | null |
ex1/lib/evaluation.py
|
tkauf15k/sos2020
|
b75188097d095e4acaca32290ba4f49fa8cb6c0e
|
[
"Apache-2.0"
] | null | null | null |
def opt_gap(fitness, optimum):
return ((1.0 * fitness - optimum) / optimum) * 100 # in percent...
| 51
| 71
| 0.637255
| 14
| 102
| 4.571429
| 0.785714
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060976
| 0.196078
| 102
| 2
| 71
| 51
| 0.719512
| 0.127451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
8fe8efc76633076b0a62acc875debbc8768fe31d
| 29
|
py
|
Python
|
tests/conftest.py
|
echeu/ironman
|
551baae6e3c8515347c2c47128d77897e7c4c38b
|
[
"MIT"
] | 5
|
2017-11-17T12:30:27.000Z
|
2019-08-15T03:04:17.000Z
|
tests/conftest.py
|
echeu/ironman
|
551baae6e3c8515347c2c47128d77897e7c4c38b
|
[
"MIT"
] | 24
|
2015-11-03T06:54:48.000Z
|
2022-03-03T21:51:21.000Z
|
tests/conftest.py
|
echeu/ironman
|
551baae6e3c8515347c2c47128d77897e7c4c38b
|
[
"MIT"
] | 3
|
2018-12-24T08:39:54.000Z
|
2021-09-29T21:42:01.000Z
|
import pytest
import ironman
| 9.666667
| 14
| 0.862069
| 4
| 29
| 6.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 2
| 15
| 14.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8ffd80274e610bc25b3ff0e502952716b1fea305
| 27
|
py
|
Python
|
wordsalad/input/__init__.py
|
skurmedel/wordsalad
|
5feaf29bf8b9c88624b783cd087a6589ea0ab48a
|
[
"MIT"
] | null | null | null |
wordsalad/input/__init__.py
|
skurmedel/wordsalad
|
5feaf29bf8b9c88624b783cd087a6589ea0ab48a
|
[
"MIT"
] | null | null | null |
wordsalad/input/__init__.py
|
skurmedel/wordsalad
|
5feaf29bf8b9c88624b783cd087a6589ea0ab48a
|
[
"MIT"
] | null | null | null |
from .tokenisation import *
| 27
| 27
| 0.814815
| 3
| 27
| 7.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 27
| 1
| 27
| 27
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8906eed6547e313039e3b85a0c9e1acfde7be18a
| 22
|
py
|
Python
|
blogApp/utils/__init__.py
|
SSabu/BikeMaps
|
c2c99f586293af6415bf1a5ce3da3d0c7126ee30
|
[
"MIT"
] | 19
|
2015-10-17T10:46:22.000Z
|
2022-03-04T07:29:38.000Z
|
blogApp/utils/__init__.py
|
SSabu/BikeMaps
|
c2c99f586293af6415bf1a5ce3da3d0c7126ee30
|
[
"MIT"
] | 36
|
2017-06-21T03:01:30.000Z
|
2022-03-04T17:26:36.000Z
|
blogApp/utils/__init__.py
|
SSabu/BikeMaps
|
c2c99f586293af6415bf1a5ce3da3d0c7126ee30
|
[
"MIT"
] | 18
|
2015-05-17T03:53:38.000Z
|
2021-04-16T19:00:56.000Z
|
from .hash62 import *
| 11
| 21
| 0.727273
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0.181818
| 22
| 1
| 22
| 22
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
89083206835b81cab058f9471903be9dc77f0c7b
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/pkginfo/index.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/pkginfo/index.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/pkginfo/index.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/4e/b9/72/8db208bf6be069d29e014326a3e9a02380faa33a053981999beb05b137
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.447917
| 0
| 96
| 1
| 96
| 96
| 0.447917
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
64ee7cd027c9781057ba8f3ce68765f70f6ccd18
| 62
|
py
|
Python
|
gym_tic_tac_toe/envs/__init__.py
|
MrScabbyCreature/gym-tic_tac_toe
|
99c532a96131c6c3b6f80be36127614363deb176
|
[
"MIT"
] | 22
|
2017-06-11T04:56:28.000Z
|
2022-01-30T17:11:15.000Z
|
gym_tic_tac_toe/envs/__init__.py
|
PaulinaSzy/gym-tic-tac-toe
|
7fedb249e22dbf61506d961fe801c04f4e63f583
|
[
"MIT"
] | 2
|
2017-06-13T09:45:38.000Z
|
2018-08-28T13:51:26.000Z
|
gym_tic_tac_toe/envs/__init__.py
|
PaulinaSzy/gym-tic-tac-toe
|
7fedb249e22dbf61506d961fe801c04f4e63f583
|
[
"MIT"
] | 10
|
2017-06-13T10:02:06.000Z
|
2021-04-10T06:54:59.000Z
|
from gym_tic_tac_toe.envs.tic_tac_toe_env import TicTacToeEnv
| 31
| 61
| 0.903226
| 12
| 62
| 4.166667
| 0.75
| 0.24
| 0.36
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 62
| 1
| 62
| 62
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8f66ecb8e321a5ed76fdbf854ed9a8158abaae8b
| 55
|
py
|
Python
|
learning/utils/__init__.py
|
lil-lab/cerealbar_generation
|
41153537c0bd8aed97f2ea841165477a8c480d58
|
[
"MIT"
] | null | null | null |
learning/utils/__init__.py
|
lil-lab/cerealbar_generation
|
41153537c0bd8aed97f2ea841165477a8c480d58
|
[
"MIT"
] | null | null | null |
learning/utils/__init__.py
|
lil-lab/cerealbar_generation
|
41153537c0bd8aed97f2ea841165477a8c480d58
|
[
"MIT"
] | null | null | null |
from .utils import print_and_log
from .rewards import *
| 27.5
| 32
| 0.818182
| 9
| 55
| 4.777778
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127273
| 55
| 2
| 33
| 27.5
| 0.895833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
56eb869d24908448eecf57a2379e6e3dc9a0711e
| 31
|
py
|
Python
|
automon/integrations/slack/__init__.py
|
TheShellLand/automonisaur
|
b5f304a44449b8664c93d8a8a3c3cf2d73aa0ce9
|
[
"MIT"
] | 2
|
2021-09-15T18:35:44.000Z
|
2022-01-18T05:36:54.000Z
|
automon/integrations/slack/__init__.py
|
TheShellLand/automonisaur
|
b5f304a44449b8664c93d8a8a3c3cf2d73aa0ce9
|
[
"MIT"
] | 16
|
2021-08-29T22:51:53.000Z
|
2022-03-09T16:08:19.000Z
|
automon/integrations/slack/__init__.py
|
TheShellLand/automonisaur
|
b5f304a44449b8664c93d8a8a3c3cf2d73aa0ce9
|
[
"MIT"
] | null | null | null |
from .slack_formatting import *
| 31
| 31
| 0.83871
| 4
| 31
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
710b1f3f583de6fd3b4077b61d329e7510ae09d4
| 346
|
py
|
Python
|
bitmovin_api_sdk/encoding/encodings/muxings/fmp4/drm/clearkey/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 11
|
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/encoding/encodings/muxings/fmp4/drm/clearkey/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 8
|
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/encoding/encodings/muxings/fmp4/drm/clearkey/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 13
|
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
from bitmovin_api_sdk.encoding.encodings.muxings.fmp4.drm.clearkey.clearkey_api import ClearkeyApi
from bitmovin_api_sdk.encoding.encodings.muxings.fmp4.drm.clearkey.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.encodings.muxings.fmp4.drm.clearkey.clear_key_drm_list_query_params import ClearKeyDrmListQueryParams
| 86.5
| 132
| 0.901734
| 47
| 346
| 6.361702
| 0.425532
| 0.120401
| 0.150502
| 0.180602
| 0.571906
| 0.571906
| 0.571906
| 0.571906
| 0.571906
| 0.571906
| 0
| 0.008982
| 0.034682
| 346
| 3
| 133
| 115.333333
| 0.886228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
710c74297a2e218e9f268eb770830a9cde275b0d
| 29
|
py
|
Python
|
twython3k/__init__.py
|
neara/twython
|
34e9474b91af875e14553b781f253b98619ea765
|
[
"MIT"
] | 2
|
2015-11-05T08:53:06.000Z
|
2016-03-01T22:13:56.000Z
|
twython3k/__init__.py
|
mgrouchy/twython
|
1d724328383f4dc16aa957fb0089ac0057bb0d74
|
[
"MIT"
] | null | null | null |
twython3k/__init__.py
|
mgrouchy/twython
|
1d724328383f4dc16aa957fb0089ac0057bb0d74
|
[
"MIT"
] | 1
|
2021-11-01T19:36:08.000Z
|
2021-11-01T19:36:08.000Z
|
from .twython import Twython
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8544c95307448b2252d786639a6b8adfc12131c0
| 15,630
|
py
|
Python
|
manilaclient/tests/unit/osc/v2/test_quotas.py
|
sapcc/python-manilaclient
|
a0985c56a55563ea5ad4e8303a8ac5b8cffa6bde
|
[
"CNRI-Python",
"Apache-1.1"
] | 37
|
2015-01-29T20:10:49.000Z
|
2021-10-01T23:31:23.000Z
|
manilaclient/tests/unit/osc/v2/test_quotas.py
|
sapcc/python-manilaclient
|
a0985c56a55563ea5ad4e8303a8ac5b8cffa6bde
|
[
"CNRI-Python",
"Apache-1.1"
] | 1
|
2017-04-12T13:57:10.000Z
|
2017-04-12T13:57:10.000Z
|
manilaclient/tests/unit/osc/v2/test_quotas.py
|
sapcc/python-manilaclient
|
a0985c56a55563ea5ad4e8303a8ac5b8cffa6bde
|
[
"CNRI-Python",
"Apache-1.1"
] | 21
|
2015-09-23T09:15:14.000Z
|
2022-03-12T16:38:17.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from osc_lib import exceptions
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
from manilaclient import api_versions
from manilaclient.common.apiclient.exceptions import BadRequest
from manilaclient.osc.v2 import quotas as osc_quotas
from manilaclient.tests.unit.osc.v2 import fakes as manila_fakes
class TestQuotas(manila_fakes.TestShare):
def setUp(self):
super(TestQuotas, self).setUp()
self.quotas_mock = self.app.client_manager.share.quotas
self.quotas_mock.reset_mock()
self.app.client_manager.share.api_version = api_versions.APIVersion(
api_versions.MAX_VERSION
)
class TestQuotaSet(TestQuotas):
project = identity_fakes.FakeProject.create_one_project()
user = identity_fakes.FakeUser.create_one_user()
def setUp(self):
super(TestQuotaSet, self).setUp()
self.quotas = manila_fakes.FakeQuotaSet.create_fake_quotas()
self.quotas_mock.update = mock.Mock()
self.quotas_mock.update.return_value = None
self.cmd = osc_quotas.QuotaSet(self.app, None)
def test_quota_set_shares(self):
arglist = [
'--project', self.project.id,
'--shares', '40'
]
verifylist = [
('project', self.project.id),
('shares', 40)
]
with mock.patch('osc_lib.utils.find_resource') as mock_find_resource:
mock_find_resource.return_value = self.project
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.quotas_mock.update.assert_called_with(
force=None,
gigabytes=None,
share_networks=None,
shares=40,
snapshot_gigabytes=None,
snapshots=None,
per_share_gigabytes=None,
tenant_id=self.project.id,
user_id=None)
self.assertIsNone(result)
def test_quota_set_gigabytes(self):
arglist = [
'--project', self.project.id,
'--gigabytes', '1100'
]
verifylist = [
('project', self.project.id),
('gigabytes', 1100)
]
with mock.patch('osc_lib.utils.find_resource') as mock_find_resource:
mock_find_resource.return_value = self.project
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.quotas_mock.update.assert_called_with(
force=None,
gigabytes=1100,
share_networks=None,
shares=None,
snapshot_gigabytes=None,
snapshots=None,
per_share_gigabytes=None,
tenant_id=self.project.id,
user_id=None)
self.assertIsNone(result)
def test_quota_set_share_type(self):
arglist = [
'--project', self.project.id,
'--share-type', 'default'
]
verifylist = [
('project', self.project.id),
('share_type', 'default')
]
with mock.patch('osc_lib.utils.find_resource') as mock_find_resource:
mock_find_resource.return_value = self.project
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.quotas_mock.update.assert_called_with(
force=None,
gigabytes=None,
share_networks=None,
share_type='default',
shares=None,
snapshot_gigabytes=None,
snapshots=None,
per_share_gigabytes=None,
tenant_id=self.project.id,
user_id=None)
self.assertIsNone(result)
def test_quota_set_force(self):
arglist = [
'--project', self.project.id,
'--force',
'--shares', '40'
]
verifylist = [
('project', self.project.id),
('force', True),
('shares', 40)
]
with mock.patch('osc_lib.utils.find_resource') as mock_find_resource:
mock_find_resource.return_value = self.project
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.quotas_mock.update.assert_called_with(
force=True,
gigabytes=None,
share_networks=None,
shares=40,
snapshot_gigabytes=None,
snapshots=None,
tenant_id=self.project.id,
per_share_gigabytes=None,
user_id=None)
self.assertIsNone(result)
def test_quota_set_api_version_exception(self):
self.app.client_manager.share.api_version = api_versions.APIVersion(
'2.39'
)
arglist = [
'--project', self.project.id,
'--share-groups', '40'
]
verifylist = [
('project', self.project.id),
('share_groups', 40)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(
exceptions.CommandError, self.cmd.take_action, parsed_args)
def test_quota_set_update_exception(self):
arglist = [
'--project', self.project.id,
'--share-groups', '40',
'--share-group-snapshots', '40'
]
verifylist = [
('project', self.project.id),
('share_groups', 40),
('share_group_snapshots', 40)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.quotas_mock.update.side_effect = BadRequest()
self.assertRaises(
exceptions.CommandError, self.cmd.take_action, parsed_args)
def test_quota_set_nothing_to_set_exception(self):
arglist = [
'--project', self.project.id,
]
verifylist = [
('project', self.project.id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(
exceptions.CommandError, self.cmd.take_action, parsed_args)
def test_quota_set_share_replicas(self):
self.app.client_manager.share.api_version = api_versions.APIVersion(
'2.53'
)
arglist = [
'--project', self.project.id,
'--share-replicas', '2',
]
verifylist = [
('project', self.project.id),
('share_replicas', 2)
]
with mock.patch('osc_lib.utils.find_resource') as mock_find_resource:
mock_find_resource.return_value = self.project
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.quotas_mock.update.assert_called_with(
force=None,
gigabytes=None,
share_networks=None,
share_replicas=2,
shares=None,
snapshot_gigabytes=None,
snapshots=None,
per_share_gigabytes=None,
tenant_id=self.project.id,
user_id=None)
self.assertIsNone(result)
def test_quota_set_replica_gigabytes_exception(self):
self.app.client_manager.share.api_version = api_versions.APIVersion(
'2.51')
arglist = [
'--project', self.project.id,
'--replica-gigabytes', '10',
]
verifylist = [
('project', self.project.id),
('replica_gigabytes', 10)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(
exceptions.CommandError, self.cmd.take_action, parsed_args)
def test_quota_set_per_share_gigabytes(self):
arglist = [
'--project', self.project.id,
'--per-share-gigabytes', '10',
]
verifylist = [
('project', self.project.id),
('per_share_gigabytes', 10)
]
with mock.patch('osc_lib.utils.find_resource') as mock_find_resource:
mock_find_resource.return_value = self.project
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.quotas_mock.update.assert_called_with(
force=None,
gigabytes=None,
share_networks=None,
shares=None,
snapshot_gigabytes=None,
snapshots=None,
per_share_gigabytes=10,
tenant_id=self.project.id,
user_id=None)
self.assertIsNone(result)
class TestQuotaShow(TestQuotas):
project = identity_fakes.FakeProject.create_one_project()
user = identity_fakes.FakeUser.create_one_user()
def setUp(self):
super(TestQuotaShow, self).setUp()
self.quotas = manila_fakes.FakeQuotaSet.create_fake_quotas()
self.quotas_mock.get.return_value = self.quotas
self.cmd = osc_quotas.QuotaShow(self.app, None)
def test_quota_show(self):
arglist = [
'--project', self.project.id
]
verifylist = [
('project', self.project.id)
]
with mock.patch('osc_lib.utils.find_resource') as mock_find_resource:
mock_find_resource.return_value = self.project
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.quotas_mock.get.assert_called_with(
detail=False,
tenant_id=self.project.id,
user_id=None
)
self.assertCountEqual(columns, self.quotas.keys())
self.assertCountEqual(data, self.quotas._info.values())
def test_quota_show_api_version_exception(self):
self.app.client_manager.share.api_version = api_versions.APIVersion(
'2.38'
)
arglist = [
'--project', self.project.id,
'--share-type', 'default'
]
verifylist = [
('project', self.project.id),
('share_type', 'default')
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(
exceptions.CommandError, self.cmd.take_action, parsed_args)
def test_quota_show_user_id_share_type_exception(self):
arglist = [
'--project', self.project.id,
'--share-type', 'default',
'--user', self.user.id
]
verifylist = [
('project', self.project.id),
('share_type', 'default'),
('user', self.user.id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(
exceptions.CommandError, self.cmd.take_action, parsed_args)
def test_quota_show_defaults(self):
arglist = [
'--project', self.project.id,
'--defaults'
]
verifylist = [
('project', self.project.id),
('defaults', True)
]
self.quotas_mock.defaults = mock.Mock()
self.quotas_mock.defaults.return_value = self.quotas
with mock.patch('osc_lib.utils.find_resource') as mock_find_resource:
mock_find_resource.return_value = self.project
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.quotas_mock.defaults.assert_called_with(self.project.id)
self.assertCountEqual(columns, self.quotas.keys())
self.assertCountEqual(data, self.quotas._info.values())
class TestQuotaDelete(TestQuotas):
project = identity_fakes.FakeProject.create_one_project()
user = identity_fakes.FakeUser.create_one_user()
def setUp(self):
super(TestQuotaDelete, self).setUp()
self.quotas = manila_fakes.FakeQuotaSet.create_fake_quotas()
self.quotas_mock.delete.return_value = None
self.cmd = osc_quotas.QuotaDelete(self.app, None)
def test_quota_delete(self):
arglist = [
'--project', self.project.id
]
verifylist = [
('project', self.project.id)
]
with mock.patch('osc_lib.utils.find_resource') as mock_find_resource:
mock_find_resource.return_value = self.project
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.quotas_mock.delete.assert_called_with(
tenant_id=self.project.id,
user_id=None)
self.assertIsNone(result)
def test_quota_delete_share_type(self):
arglist = [
'--project', self.project.id,
'--share-type', 'default'
]
verifylist = [
('project', self.project.id),
('share_type', 'default')
]
with mock.patch('osc_lib.utils.find_resource') as mock_find_resource:
mock_find_resource.return_value = self.project
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.quotas_mock.delete.assert_called_with(
share_type='default',
tenant_id=self.project.id,
user_id=None)
self.assertIsNone(result)
def test_quota_delete_api_version_exception(self):
self.app.client_manager.share.api_version = api_versions.APIVersion(
'2.38'
)
arglist = [
'--project', self.project.id,
'--share-type', 'default'
]
verifylist = [
('project', self.project.id),
('share_type', 'default')
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(
exceptions.CommandError, self.cmd.take_action, parsed_args)
def test_quota_delete_user_share_type_exeption(self):
arglist = [
'--project', self.project.id,
'--share-type', 'default',
'--user', self.user.id
]
verifylist = [
('project', self.project.id),
('share_type', 'default'),
('user', self.user.id)
]
with mock.patch('osc_lib.utils.find_resource') as mock_find_resource:
mock_find_resource.return_value = self.project
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(
exceptions.CommandError, self.cmd.take_action, parsed_args)
| 33.044397
| 77
| 0.585477
| 1,662
| 15,630
| 5.272563
| 0.104693
| 0.071551
| 0.068241
| 0.082164
| 0.843661
| 0.829739
| 0.79596
| 0.753623
| 0.737076
| 0.727491
| 0
| 0.006605
| 0.31222
| 15,630
| 472
| 78
| 33.114407
| 0.808558
| 0.034421
| 0
| 0.687166
| 0
| 0
| 0.07654
| 0.02401
| 0
| 0
| 0
| 0
| 0.080214
| 1
| 0.058824
| false
| 0
| 0.018717
| 0
| 0.104278
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
854bf32905bffe09ba53c80613cf46e570588fc1
| 146
|
py
|
Python
|
app/api/__init__.py
|
MingwangLin/image_super_resolution
|
861af8c5208a570ac63ce5f96be1491f7ee8bb60
|
[
"Apache-2.0"
] | null | null | null |
app/api/__init__.py
|
MingwangLin/image_super_resolution
|
861af8c5208a570ac63ce5f96be1491f7ee8bb60
|
[
"Apache-2.0"
] | null | null | null |
app/api/__init__.py
|
MingwangLin/image_super_resolution
|
861af8c5208a570ac63ce5f96be1491f7ee8bb60
|
[
"Apache-2.0"
] | null | null | null |
from flask import Blueprint
api = Blueprint('api', __name__)
from . import login, user, decorator, notification, tweet, repost, comment, follow
| 24.333333
| 82
| 0.760274
| 18
| 146
| 5.944444
| 0.777778
| 0.224299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.143836
| 146
| 5
| 83
| 29.2
| 0.856
| 0
| 0
| 0
| 0
| 0
| 0.020548
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
859c4c891d484245aa8987e8b08988b44d0afc38
| 127
|
py
|
Python
|
vnpy/app/cta_strategy/__init__.py
|
Billy-Meng/vnpy_origin
|
b0b0868027d70b1ba5dac65aa1a6d5e4246a0900
|
[
"MIT"
] | 1
|
2020-06-18T16:38:29.000Z
|
2020-06-18T16:38:29.000Z
|
vnpy/app/cta_strategy/__init__.py
|
Billy-Meng/vnpy_origin
|
b0b0868027d70b1ba5dac65aa1a6d5e4246a0900
|
[
"MIT"
] | 2
|
2020-06-22T12:12:43.000Z
|
2020-06-23T01:26:10.000Z
|
vnpy/app/cta_strategy/__init__.py
|
Billy-Meng/vnpy
|
b0b0868027d70b1ba5dac65aa1a6d5e4246a0900
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
from pathlib import Path
import sys
import vnpy_ctastrategy
sys.modules[__name__] = vnpy_ctastrategy
| 14.111111
| 40
| 0.755906
| 17
| 127
| 5.294118
| 0.705882
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009174
| 0.141732
| 127
| 8
| 41
| 15.875
| 0.816514
| 0.15748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a431fc90fb2d54f229eb246274241ee1adee896c
| 138
|
py
|
Python
|
tests/smscalls.py
|
thetomcraig/HPI
|
5eecd8721dc0cbfc68040106bb7b540b1567dff3
|
[
"MIT"
] | null | null | null |
tests/smscalls.py
|
thetomcraig/HPI
|
5eecd8721dc0cbfc68040106bb7b540b1567dff3
|
[
"MIT"
] | null | null | null |
tests/smscalls.py
|
thetomcraig/HPI
|
5eecd8721dc0cbfc68040106bb7b540b1567dff3
|
[
"MIT"
] | null | null | null |
from my.smscalls import calls
# TODO that's a pretty dumb test; perhaps can be generic..
def test():
assert len(list(calls())) > 10
| 19.714286
| 58
| 0.688406
| 23
| 138
| 4.130435
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018018
| 0.195652
| 138
| 6
| 59
| 23
| 0.837838
| 0.405797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a43dac2be61d13a45c8534ccbb4d9351535bce3d
| 12,868
|
py
|
Python
|
tests/test_pp_tesseract_dcr.py
|
KonnexionsGmbH/ocr_bench
|
8f54b386c22b43a2f4e8f98dc6f7ac69edf7e0be
|
[
"CNRI-Python",
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
tests/test_pp_tesseract_dcr.py
|
KonnexionsGmbH/ocr_bench
|
8f54b386c22b43a2f4e8f98dc6f7ac69edf7e0be
|
[
"CNRI-Python",
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
tests/test_pp_tesseract_dcr.py
|
KonnexionsGmbH/ocr_bench
|
8f54b386c22b43a2f4e8f98dc6f7ac69edf7e0be
|
[
"CNRI-Python",
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# pylint: disable=unused-argument
"""Testing Module pp.tesseract_dcr."""
import typing
import cfg.glob
import pytest
import dcr
# -----------------------------------------------------------------------------
# Constants & Globals.
# -----------------------------------------------------------------------------
# pylint: disable=W0212
# @pytest.mark.issue
# -----------------------------------------------------------------------------
# Test RUN_ACTION_IMAGE_2_PDF - normal.
# -----------------------------------------------------------------------------
def test_run_action_image_2_pdf_normal(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_IMAGE_2_PDF - normal."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
[
("pdf_scanned_ok", "pdf"),
],
cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "true"),
],
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PDF_2_IMAGE])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_IMAGE_2_PDF])
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_image_2_pdf_normal <=========")
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox,
[],
[],
)
files_expected: typing.List = [
"pdf_scanned_ok_1.pdf",
"pdf_scanned_ok_1_1.pdf",
]
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_accepted,
[],
files_expected,
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_rejected,
[],
[],
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_IMAGE_2_PDF - normal - keep.
# -----------------------------------------------------------------------------
def test_run_action_image_2_pdf_normal_keep(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_IMAGE_2_PDF - normal - keep."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
[
("pdf_scanned_01_ok_16_c", "bmp"),
("pdf_scanned_01_ok_24", "bmp"),
("pdf_scanned_01_ok_256_c", "bmp"),
("pdf_scanned_01_ok_m", "bmp"),
("pdf_scanned_02_ok", "gif"),
# TBD next Tesseract OCR version
# ("pdf_scanned_03_ok", "jp2"),
("pdf_scanned_04_ok", "jpeg"),
("pdf_scanned_05_ok", "png"),
("pdf_scanned_06_ok", "pnm"),
("pdf_scanned_07_ok", "tiff"),
("pdf_scanned_08_ok", "webp"),
],
cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "false"),
(cfg.glob.setup._DCR_CFG_TESSERACT_TIMEOUT, "30"),
],
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_IMAGE_2_PDF])
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_image_2_pdf_normal <=========")
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox,
[],
[],
)
files_expected: typing.List = [
"pdf_scanned_01_ok_16_c_1.bmp",
"pdf_scanned_01_ok_16_c_1.pdf",
"pdf_scanned_01_ok_24_3.bmp",
"pdf_scanned_01_ok_24_3.pdf",
"pdf_scanned_01_ok_256_c_5.bmp",
"pdf_scanned_01_ok_256_c_5.pdf",
"pdf_scanned_01_ok_m_7.bmp",
"pdf_scanned_01_ok_m_7.pdf",
"pdf_scanned_02_ok_9.gif",
"pdf_scanned_02_ok_9.pdf",
# TBD next Tesseract OCR version
# "pdf_scanned_03_ok_11.jp2",
# "pdf_scanned_03_ok_11.pdf",
"pdf_scanned_04_ok_11.jpeg",
"pdf_scanned_04_ok_11.pdf",
"pdf_scanned_05_ok_13.png",
"pdf_scanned_05_ok_13.pdf",
"pdf_scanned_06_ok_15.pnm",
"pdf_scanned_06_ok_15.pdf",
"pdf_scanned_07_ok_17.tiff",
"pdf_scanned_07_ok_17.pdf",
"pdf_scanned_08_ok_19.webp",
"pdf_scanned_08_ok_19.pdf",
]
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_accepted,
[],
files_expected,
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_rejected,
[],
[],
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_IMAGE_2_PDF - normal - duplicate.
# -----------------------------------------------------------------------------
def test_run_action_image_2_pdf_normal_duplicate(fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_IMAGE_2_PDF - normal - duplicate."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_image_2_pdf_normal_duplicate <=========")
stem_name_1: str = "tiff_pdf_text_ok"
file_ext_1: str = "tiff"
pytest.helpers.copy_files_4_pytest_2_dir([(stem_name_1, file_ext_1)], cfg.glob.setup.directory_inbox)
stem_name_2: str = "tiff_pdf_text_ok_1"
file_ext_2: str = "pdf"
pytest.helpers.help_run_action_all_complete_duplicate_file(file_ext_1, file_ext_2, stem_name_1, stem_name_2)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_IMAGE_2_PDF - normal - timeout.
# -----------------------------------------------------------------------------
def test_run_action_image_2_pdf_normal_timeout(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_IMAGE_2_PDF - normal - timeout."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_image_2_pdf_normal_timeout 1/2 <=========")
stem_name: str = "pdf_scanned_ok"
file_ext: str = "pdf"
document_id, _file_tesseract_1 = pytest.helpers.help_run_action_process_inbox_normal(
stem_name,
file_ext,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "false"),
(cfg.glob.setup._DCR_CFG_TESSERACT_TIMEOUT, "1"),
],
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PDF_2_IMAGE])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_IMAGE_2_PDF])
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_image_2_pdf_normal_timeout 2/2 <=========")
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox,
[],
[],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_accepted,
[],
[
stem_name + "_" + str(document_id) + "." + file_ext,
stem_name + "_" + str(document_id) + "_1." + cfg.glob.setup.pdf2image_type,
],
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_rejected,
[],
[],
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_IMAGE_2_PDF - reunite.
# -----------------------------------------------------------------------------
def test_run_action_image_2_pdf_reunite(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_IMAGE_2_PDF - reunite."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
pytest.helpers.copy_files_4_pytest_2_dir(
[
("translating_sql_into_relational_algebra_p01_02", "pdf"),
],
cfg.glob.setup.directory_inbox,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "true"),
(cfg.glob.setup._DCR_CFG_TESSERACT_TIMEOUT, "30"),
],
)
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PDF_2_IMAGE])
dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_IMAGE_2_PDF])
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_image_2_pdf_reunite <=========")
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox,
[],
[],
)
files_expected: typing.List = [
"translating_sql_into_relational_algebra_p01_02_1.pdf",
"translating_sql_into_relational_algebra_p01_02_1_0.pdf",
]
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_accepted,
[],
files_expected,
)
pytest.helpers.verify_content_of_directory(
cfg.glob.setup.directory_inbox_rejected,
[],
[],
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test RUN_ACTION_IMAGE_2_PDF - reunite - duplicate.
# -----------------------------------------------------------------------------
def test_run_action_image_2_pdf_reunite_duplicate(fxtr_setup_empty_db_and_inbox):
"""Test RUN_ACTION_IMAGE_2_PDF - reunite - duplicate."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
cfg.glob.logger.info("=========> test_run_action_image_2_pdf_normal_duplicate <=========")
stem_name_1: str = "translating_sql_into_relational_algebra_p01_02"
file_ext_1: str = "pdf"
pytest.helpers.copy_files_4_pytest_2_dir([(stem_name_1, file_ext_1)], cfg.glob.setup.directory_inbox)
stem_name_2: str = "translating_sql_into_relational_algebra_p01_02_1_0"
file_ext_2: str = "pdf"
values_original = pytest.helpers.backup_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
[
(cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "true"),
(cfg.glob.setup._DCR_CFG_TESSERACT_TIMEOUT, "30"),
(cfg.glob.setup._DCR_CFG_TETML_LINE, "true"),
(cfg.glob.setup._DCR_CFG_TETML_WORD, "true"),
],
)
pytest.helpers.help_run_action_all_complete_duplicate_file(file_ext_1, file_ext_2, stem_name_1, stem_name_2)
pytest.helpers.restore_config_params(
cfg.glob.setup._DCR_CFG_SECTION,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
| 34.967391
| 112
| 0.527588
| 1,412
| 12,868
| 4.302408
| 0.093484
| 0.10716
| 0.077037
| 0.071605
| 0.918189
| 0.860905
| 0.83358
| 0.806749
| 0.750617
| 0.723621
| 0
| 0.022909
| 0.175707
| 12,868
| 367
| 113
| 35.06267
| 0.549826
| 0.263366
| 0
| 0.559829
| 0
| 0
| 0.167962
| 0.116551
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.017094
| 0
| 0.042735
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a482a6f89635d6cebfa1c08c5e14f59a6956ea6b
| 18,310
|
py
|
Python
|
vae.py
|
rshen36/vae
|
e0bdaf1791c0e0dbe4ba203244ec9027f3589b81
|
[
"MIT"
] | 1
|
2018-09-03T06:53:34.000Z
|
2018-09-03T06:53:34.000Z
|
vae.py
|
rshen36/vae
|
e0bdaf1791c0e0dbe4ba203244ec9027f3589b81
|
[
"MIT"
] | null | null | null |
vae.py
|
rshen36/vae
|
e0bdaf1791c0e0dbe4ba203244ec9027f3589b81
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
import tensorflow.contrib.distributions as dbns
# parent class for all VAE variants
class AbstVAE:
def __init__(self, seed, model_scope):
self.seed = seed
self.model_scope = model_scope
np.random.seed(self.seed)
def encoder(self, x, reuse, trainable):
raise NotImplementedError
def decoder(self, z, reuse, trainable):
raise NotImplementedError
def _build_model(self):
raise NotImplementedError
class BernoulliVAE(AbstVAE):
def __init__(self, x_dims, z_dim=100, lr=.02, seed=123, hidden_dim=500, model_name="vae"):
super().__init__(seed=seed, model_scope=model_name)
self.x_dims = x_dims
self.z_dim = z_dim
self.hidden_dim = hidden_dim
self.lr = lr
with tf.variable_scope(self.model_scope):
self._build_model()
def encoder(self, x, scope="encoder", reuse=False, trainable=True):
with tf.variable_scope(scope, reuse=reuse):
# for now, hardcoding model architecture as that specified in paper
enet = layers.fully_connected(x, num_outputs=self.hidden_dim, activation_fn=tf.nn.tanh,
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=tf.truncated_normal_initializer(stddev=0.01),
trainable=trainable)
z_params = layers.fully_connected(enet, num_outputs=self.z_dim * 2, activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=tf.truncated_normal_initializer(stddev=0.01),
trainable=trainable)
return z_params
def decoder(self, z, scope="decoder", reuse=False, trainable=True):
with tf.variable_scope(scope, reuse=reuse):
# for now, hardcoding model architecture as that specified in paper
dnet = layers.fully_connected(z, num_outputs=self.hidden_dim, activation_fn=tf.nn.tanh,
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=tf.truncated_normal_initializer(stddev=0.01))
x_hat = layers.fully_connected(dnet, num_outputs=int(np.prod(self.x_dims)), activation_fn=tf.nn.sigmoid,
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=tf.truncated_normal_initializer(stddev=0.01),
trainable=trainable) # Bernoulli MLP decoder
return x_hat
def _build_model(self):
# input points
self.x = tf.placeholder(tf.float32, shape=[None, int(np.prod(self.x_dims))], name="X")
self.noise = tf.placeholder(tf.float32, shape=[None, self.z_dim], name="noise")
self.p_z = dbns.Normal(loc=tf.zeros_like(self.noise), scale=tf.ones_like(self.noise))
# encoder
z_params = self.encoder(self.x)
z_mu = z_params[:, self.z_dim:]
z_sigma = tf.exp(z_params[:, :self.z_dim])
self.q_z = dbns.Normal(loc=z_mu, scale=z_sigma)
# reparameterization trick
z = z_mu + tf.multiply(z_sigma, self.p_z.sample())
# z = self.q_z.sample()
# decoder
self.x_hat = self.decoder(z)
self.p_x_z = dbns.Bernoulli(logits=self.x_hat)
nll_loss = -tf.reduce_sum(self.x * tf.log(1e-8 + self.x_hat) +
(1 - self.x) * tf.log(1e-8 + 1 - self.x_hat), 1) # Bernoulli nll
kl_loss = 0.5 * tf.reduce_sum(tf.square(z_mu) + tf.square(z_sigma) - tf.log(1e-8 + tf.square(z_sigma)) - 1, 1)
# kl_loss = tf.reduce_sum(dbns.kl_divergence(self.q_z, self.p_z), 1)
self.loss = tf.reduce_mean(nll_loss + kl_loss)
self.elbo = -1.0 * tf.reduce_mean(nll_loss + kl_loss)
# in original paper, lr chosen from {0.01, 0.02, 0.1} depending on first few iters training performance
optimizer = tf.train.AdagradOptimizer(learning_rate=self.lr)
self.train_op = optimizer.minimize(self.loss)
# for sampling
self.z = self.encoder(self.x, trainable=False, reuse=True)
self.z_pl = tf.placeholder(tf.float32, shape=[None, self.z_dim])
self.sample = self.decoder(self.z_pl, trainable=False, reuse=True)
# tensorboard summaries
x_img = tf.reshape(self.x, [-1] + self.x_dims)
tf.summary.image('data', x_img)
xhat_img = tf.reshape(self.x_hat, [-1] + self.x_dims)
tf.summary.image('reconstruction', xhat_img)
tf.summary.scalar('reconstruction_loss', tf.reduce_mean(nll_loss))
tf.summary.scalar('kl_loss', tf.reduce_mean(kl_loss))
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('elbo', self.elbo)
self.merged = tf.summary.merge_all()
class GaussianVAE(AbstVAE):
def __init__(self, x_dims, z_dim=100, hidden_dim=500, lr=.02, seed=123, model_name="vae"):
super().__init__(seed=seed, model_scope=model_name)
self.x_dims = x_dims
self.z_dim = z_dim
self.hidden_dim = hidden_dim
self.lr = lr
with tf.variable_scope(self.model_scope):
self._build_model()
def encoder(self, x, scope="encoder", reuse=False, trainable=True):
with tf.variable_scope(scope, reuse=reuse):
# for now, hardcoding model architecture as that specified in paper
enet = layers.fully_connected(x, num_outputs=self.hidden_dim, activation_fn=tf.nn.tanh,
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=tf.truncated_normal_initializer(stddev=0.01),
trainable=trainable)
z_params = layers.fully_connected(enet, num_outputs=self.z_dim * 2, activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=tf.truncated_normal_initializer(stddev=0.01),
trainable=trainable)
return z_params
def decoder(self, z, scope="decoder", reuse=False, trainable=True):
with tf.variable_scope("decoder", reuse=reuse):
# for now, hardcoding model architecture as that specified in paper
dnet = layers.fully_connected(z, num_outputs=self.hidden_dim, activation_fn=tf.nn.tanh,
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=tf.truncated_normal_initializer(stddev=0.01),
trainable=trainable)
out_params = layers.fully_connected(dnet, num_outputs=int(np.prod(self.x_dims)*2), activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=tf.truncated_normal_initializer(stddev=0.01),
trainable=trainable)
return out_params
def _build_model(self):
# input points
self.x = tf.placeholder(tf.float32, shape=[None, int(np.prod(self.x_dims))], name="X")
self.noise = tf.placeholder(tf.float32, shape=[None, self.z_dim], name="noise")
self.p_z = dbns.Normal(loc=tf.zeros_like(self.noise), scale=tf.ones_like(self.noise))
# encoder
z_params = self.encoder(self.x)
z_mu = z_params[:, self.z_dim:]
z_sigma = tf.exp(z_params[:, :self.z_dim])
self.q_z = dbns.Normal(loc=z_mu, scale=z_sigma)
# reparameterization trick
z = z_mu + tf.multiply(z_sigma, self.p_z.sample())
# z = self.q_z.sample()
# decoder
out_params = self.decoder(z)
mu = tf.nn.sigmoid(out_params[:, int(np.prod(self.x_dims)):]) # out_mu constrained to (0,1)
sigma = tf.exp(out_params[:, :int(np.prod(self.x_dims))])
self.x_hat = mu
self.p_x_z = dbns.Normal(loc=mu, scale=sigma)
nll_loss = -tf.reduce_sum(self.p_x_z.log_prob(self.x), 1)
kl_loss = 0.5 * tf.reduce_sum(tf.square(z_mu) + tf.square(z_sigma) - tf.log(1e-8 + tf.square(z_sigma)) - 1, 1)
# kl_loss = tf.reduce_sum(dbns.kl_divergence(self.q_z, self.p_z), 1)
self.loss = tf.reduce_mean(nll_loss + kl_loss)
self.elbo = -1.0 * tf.reduce_mean(nll_loss + kl_loss)
# in original paper, lr chosen from {0.01, 0.02, 0.1} depending on first few iters training performance
optimizer = tf.train.AdagradOptimizer(learning_rate=self.lr)
self.train_op = optimizer.minimize(self.loss)
# for sampling
self.z = self.encoder(self.x, trainable=False, reuse=True)
self.z_pl = tf.placeholder(tf.float32, shape=[None, self.z_dim])
self.sample = self.decoder(self.z_pl, trainable=False, reuse=True)
# tensorboard summaries
x_img = tf.reshape(self.x, [-1] + self.x_dims)
tf.summary.image('data', x_img)
xhat_img = tf.reshape(self.x_hat, [-1] + self.x_dims)
tf.summary.image('reconstruction', xhat_img)
tf.summary.scalar('reconstruction_loss', tf.reduce_mean(nll_loss))
tf.summary.scalar('kl_loss', tf.reduce_mean(kl_loss))
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('elbo', self.elbo)
self.merged = tf.summary.merge_all()
class BernoulliIWAE(AbstVAE):
def __init__(self, x_dims, z_dim=50, hidden_dim=200, seed=123, batch_size=20, n_samples=5, model_name="iwae"):
super().__init__(seed=seed, model_scope=model_name)
self.x_dims = x_dims
self.z_dim = z_dim
self.hidden_dim = hidden_dim
self.batch_size = batch_size
self.n_samples = n_samples
with tf.variable_scope(self.model_scope):
self._build_model()
def encoder(self, x, scope="encoder", hidden_dim=200, z_dim=50, reuse=False, trainable=True):
with tf.variable_scope(scope, reuse=reuse):
# Initialization via heuristic specified by Glorot & Bengio 2010
enet = layers.fully_connected(x, num_outputs=hidden_dim, activation_fn=tf.nn.tanh,
weights_initializer=layers.xavier_initializer(),
biases_initializer=layers.xavier_initializer(),
trainable=trainable)
enet = layers.fully_connected(enet, num_outputs=hidden_dim, activation_fn=tf.nn.tanh,
weights_initializer=layers.xavier_initializer(),
biases_initializer=layers.xavier_initializer(),
trainable=trainable)
z_params = layers.fully_connected(enet, num_outputs=z_dim * 2, activation_fn=None,
weights_initializer=layers.xavier_initializer(),
biases_initializer=layers.xavier_initializer(),
trainable=trainable)
return z_params
def decoder(self, z, scope="decoder", hidden_dim=200, reuse=False, trainable=True):
with tf.variable_scope(scope, reuse=reuse):
# Initialization via heuristic specified by Glorot & Bengio 2010
dnet = layers.fully_connected(z, num_outputs=hidden_dim, activation_fn=tf.nn.tanh,
weights_initializer=layers.xavier_initializer(),
biases_initializer=layers.xavier_initializer(),
trainable=trainable)
dnet = layers.fully_connected(dnet, num_outputs=hidden_dim, activation_fn=tf.nn.tanh,
weights_initializer=layers.xavier_initializer(),
biases_initializer=layers.xavier_initializer(),
trainable=trainable)
x_hat = layers.fully_connected(dnet, num_outputs=int(np.prod(self.x_dims)), activation_fn=tf.nn.sigmoid,
weights_initializer=layers.xavier_initializer(),
biases_initializer=layers.xavier_initializer(),
trainable=trainable) # Bernoulli MLP decoder
return x_hat
def _build_model(self):
# input points
self.x = tf.placeholder(tf.float32, shape=[self.batch_size, int(np.prod(self.x_dims))], name="X")
x = tf.tile(self.x, multiples=[self.n_samples, 1])
self.lr = tf.placeholder(tf.float32, shape=(), name="lr")
self.p_z = dbns.Normal(loc=tf.zeros(shape=[self.batch_size * self.n_samples, self.z_dim]),
scale=tf.ones(shape=[self.batch_size * self.n_samples, self.z_dim]))
# self.p_h1 = dbns.Normal(loc=tf.zeros(shape=[self.batch_size * self.n_samples, 100]),
# scale=tf.ones(shape=[self.batch_size * self.n_samples, 100]))
# self.p_h2 = dbns.Normal(loc=tf.zeros(shape=[self.batch_size * self.n_samples, 50]),
# scale=tf.ones(shape=[self.batch_size * self.n_samples, 50]))
# self.p_h1_ = dbns.Normal(loc=tf.zeros(shape=[self.batch_size * self.n_samples, 100]),
# scale=tf.ones(shape=[self.batch_size * self.n_samples, 100]))
# encoder
z_params = self.encoder(x)
z_mu = z_params[:, self.z_dim:]
z_sigma = tf.exp(z_params[:, :self.z_dim])
self.q_z = dbns.Normal(loc=z_mu, scale=z_sigma)
# params_q_h1_x = self.encoder(x, scope="q_h1_x", hidden_dim=200, z_dim=100)
# h1_mu = params_q_h1_x[:, 100:]
# h1_sigma = tf.exp(params_q_h1_x[:, :100])
# self.q_h1_x = dbns.Normal(loc=h1_mu, scale=h1_sigma)
# h1 = h1_mu + tf.multiply(h1_sigma, self.p_h1.sample())
# params_q_h2_h1 = self.encoder(h1, scope="q_h2_h1", hidden_dim=100, z_dim=50)
# h2_mu = params_q_h2_h1[:, 50:]
# h2_sigma = tf.exp(params_q_h2_h1[:, :50])
# self.q_h2_h1 = dbns.Normal(loc=h2_mu, scale=h2_sigma)
# h2 = h2_mu + tf.multiply(h2_sigma, self.p_h2.sample())
z = z_mu + tf.multiply(z_sigma, self.p_z.sample())
# params_p_h1_h2 = self.encoder(h2, scope="p_h1_h2", hidden_dim=100, z_dim=100)
# h1_mu_ = params_p_h1_h2[:, 100:]
# h1_sigma_ = tf.exp(params_p_h1_h2[:, :100])
# self.p_h1_h2 = dbns.Normal(loc=h1_mu_, scale=h1_sigma_)
# h1_ = h1_mu_ + tf.multiply(h1_sigma_, self.p_h1_.sample())
# x_hat = self.decoder(h1_, hidden_dim=200)
# x_hat = self.decoder(h1, hidden_dim=200)
x_hat = self.decoder(z)
self.out_dbn = dbns.Bernoulli(logits=x_hat)
log_lik = tf.reduce_sum(x * tf.log(1e-8 + x_hat) + (1 - x) * tf.log(1e-8 + 1 - x_hat), 1)
neg_kld = tf.reduce_sum(self.p_z.log_prob(z) - self.q_z.log_prob(z), 1)
# log_lik = (tf.reduce_sum(x * tf.log(1e-8 + x_hat) + (1 - x) * tf.log(1e-8 + 1 - x_hat), 1) +
# tf.reduce_sum(self.p_h1_h2.log_prob(h1), 1))
# neg_kld = (tf.reduce_sum(self.p_h1_h2.log_prob(h1_) - self.q_h1_x.log_prob(h1), 1) +
# tf.reduce_sum(self.p_h1.log_prob(h1) - self.q_h1_x.log_prob(h1), 1) +
# tf.reduce_sum(self.p_h2.log_prob(h2) - self.q_h2_h1.log_prob(h2), 1))
# log_lik = (tf.reduce_sum(x * tf.log(1e-8 + x_hat) + (1 - x) * tf.log(1e-8 + 1 - x_hat), 1) +
# tf.reduce_sum(self.p_h1_h2.log_prob(h1), 1) + tf.reduce_sum(self.p_h2.log_prob(h2), 1))
# neg_kld = tf.reduce_sum(self.q_h1_x.log_prob(h1), 1) + tf.reduce_sum(self.q_h2_h1.log_prob(h2), 1)
# calculate importance weights using logsumexp and exp-normalize tricks
log_iws = (tf.reshape(log_lik, [self.batch_size, self.n_samples]) -
tf.reshape(neg_kld, [self.batch_size, self.n_samples]))
max_log_iws = tf.reduce_max(log_iws, axis=1, keepdims=True)
log_iws -= max_log_iws
self.elbo = tf.reduce_mean(max_log_iws + tf.log(1e-8 + tf.reduce_mean(
tf.exp(log_iws), axis=1, keepdims=True)))
self.loss = -self.elbo
# compute gradients
log_norm_const = tf.log(tf.clip_by_value(tf.reduce_sum(tf.exp(log_iws), 1, keepdims=True), 1e-9, np.inf))
log_norm_iws = tf.reshape(log_iws - log_norm_const, shape=[-1])
norm_iws = tf.stop_gradient(tf.exp(log_norm_iws))
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
grads = tf.gradients(-tf.reshape(log_iws, [-1]) * norm_iws, trainable_vars)
grads_and_vars = zip(grads, trainable_vars)
# for now, hardcoding the Adam optimizer parameters used in the paper
optimizer = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=0.9, beta2=0.999, epsilon=0.0001)
optimizer.apply_gradients(grads_and_vars)
self.train_op = optimizer.minimize(self.loss)
# for sampling
self.z = self.encoder(self.x, trainable=False, reuse=True)
self.z_pl = tf.placeholder(tf.float32, shape=[None, self.z_dim])
self.sample = self.decoder(self.z_pl, trainable=False, reuse=True)
# tensorboard summaries
x_img = tf.reshape(x, [-1] + self.x_dims)
tf.summary.image('data', x_img)
sample_img = tf.reshape(x_hat, [-1] + self.x_dims)
tf.summary.image('samples', sample_img)
tf.summary.scalar('log_lik', tf.reduce_mean(log_lik))
tf.summary.scalar('neg_kld', tf.reduce_mean(neg_kld))
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('elbo', self.elbo)
self.merged = tf.summary.merge_all()
| 54.984985
| 118
| 0.602567
| 2,495
| 18,310
| 4.17475
| 0.087375
| 0.021601
| 0.017281
| 0.043011
| 0.831125
| 0.802323
| 0.771505
| 0.764785
| 0.745968
| 0.737519
| 0
| 0.027279
| 0.279246
| 18,310
| 332
| 119
| 55.150602
| 0.761991
| 0.182906
| 0
| 0.683258
| 0
| 0
| 0.01417
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072398
| false
| 0
| 0.0181
| 0
| 0.135747
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a49be588b0168f451fe0900c5e22f307cf11b217
| 5,695
|
py
|
Python
|
tf_siren/siren_mlp.py
|
jackietung1219/tf_SIREN
|
9ed4d66f027744e061cc45585557837abbae6e9c
|
[
"MIT"
] | 134
|
2020-06-19T03:22:13.000Z
|
2022-03-23T17:41:28.000Z
|
tf_siren/siren_mlp.py
|
jackietung1219/tf_SIREN
|
9ed4d66f027744e061cc45585557837abbae6e9c
|
[
"MIT"
] | 17
|
2020-06-22T08:15:11.000Z
|
2021-03-30T09:05:06.000Z
|
tf_siren/siren_mlp.py
|
jackietung1219/tf_SIREN
|
9ed4d66f027744e061cc45585557837abbae6e9c
|
[
"MIT"
] | 29
|
2020-06-19T13:15:20.000Z
|
2022-03-10T06:01:42.000Z
|
import tensorflow as tf
from tf_siren import siren
class SIRENModel(tf.keras.Model):
def __init__(self, units: int, final_units: int,
final_activation: str = "linear",
num_layers: int = 1,
w0: float = 30.0,
w0_initial: float = 30.0,
initial_layer_init: str = 'siren_first_uniform',
use_bias: bool = True, **kwargs):
"""
SIREN model from the paper [Implicit Neural Representations with Periodic Activation Functions](https://arxiv.org/abs/2006.09661).
Used to create a multi-layer MLP using SinusodialRepresentationDense layers.
Args:
units: Number of hidden units in the intermediate layers.
final_units: Number of hidden units in the final layer.
final_activation: Activation function of the final layer.
num_layers: Number of layers in the network.
w0: w0 in the activation step `act(x; w0) = sin(w0 * x)`.
w0_initial: By default, scales `w0` of first layer to 30 (as used in the paper).
initial_layer_init: Initialization for the first SIREN layer.
Can be any valid keras initialization object or string.
For SIREN, use `siren_uniform` for the general initialization,
or `siren_first_uniform` which is specific for first layer.
use_bias: Boolean whether to use bias or not.
# References:
- [Implicit Neural Representations with Periodic Activation Functions](https://arxiv.org/abs/2006.09661)
"""
super(SIRENModel, self).__init__(**kwargs)
siren_layers = [siren.SinusodialRepresentationDense(units, w0=w0_initial, use_bias=use_bias,
kernel_initializer=initial_layer_init,
**kwargs)]
for _ in range(num_layers - 1):
siren_layers.append(siren.SinusodialRepresentationDense(units, w0=w0, use_bias=use_bias, **kwargs))
self.siren_layers = tf.keras.Sequential(siren_layers)
self.final_dense = siren.SinusodialRepresentationDense(final_units, activation=final_activation,
use_bias=use_bias, **kwargs)
def call(self, inputs, training=None, mask=None):
features = self.siren_layers(inputs)
output = self.final_dense(features)
return output
class ScaledSIRENModel(tf.keras.Model):
def __init__(self, units: int, final_units: int,
final_activation: str = "linear",
num_layers: int = 1,
w0: float = 30.0,
w0_initial: float = 30.0,
scale: float = 1.0,
scale_initial: float = None,
initial_layer_init: str = 'siren_first_uniform',
use_bias: bool = True, **kwargs):
"""
Scaled SIREN model from the paper [Implicit Neural Representations with Periodic Activation Functions](https://arxiv.org/abs/2006.09661).
Used to create a multi-layer MLP using ScaledSinusodialRepresentationDense layers.
Args:
units: Number of hidden units in the intermediate layers.
final_units: Number of hidden units in the final layer.
final_activation: Activation function of the final layer.
num_layers: Number of layers in the network.
w0: w0 in the activation step `act(x; w0) = sin(w0 * x)`.
w0_initial: By default, scales `w0` of first layer to 30 (as used in the paper).
scale: Scale of the kernel matrix prior to matmul.
scale_initial: Scale of the kernel matrix prior to matmul, for the first layer.
By default, uses the `w0_initial` value if not passed a value.
initial_layer_init: Initialization for the first SIREN layer.
Can be any valid keras initialization object or string.
For SIREN, use `siren_uniform` for the general initialization,
or `siren_first_uniform` which is specific for first layer.
use_bias: Boolean whether to use bias or not.
# References:
- [Implicit Neural Representations with Periodic Activation Functions](https://arxiv.org/abs/2006.09661)
"""
super(ScaledSIRENModel, self).__init__(**kwargs)
if scale_initial is None:
scale_initial = w0_initial
siren_layers = [siren.ScaledSinusodialRepresentationDense(units, scale=scale_initial, w0=w0_initial,
use_bias=use_bias,
kernel_initializer=initial_layer_init,
**kwargs)]
for _ in range(num_layers - 1):
siren_layers.append(siren.ScaledSinusodialRepresentationDense(units, scale=scale, w0=w0, use_bias=use_bias,
**kwargs))
self.siren_layers = tf.keras.Sequential(siren_layers)
self.final_dense = siren.ScaledSinusodialRepresentationDense(final_units, scale=scale,
activation=final_activation,
use_bias=use_bias, **kwargs)
def call(self, inputs, training=None, mask=None):
features = self.siren_layers(inputs)
output = self.final_dense(features)
return output
| 51.772727
| 145
| 0.585601
| 627
| 5,695
| 5.15311
| 0.183413
| 0.038997
| 0.029712
| 0.025998
| 0.851749
| 0.796657
| 0.796657
| 0.796657
| 0.774992
| 0.774992
| 0
| 0.022514
| 0.344864
| 5,695
| 109
| 146
| 52.247706
| 0.843474
| 0.404214
| 0
| 0.627451
| 0
| 0
| 0.016223
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078431
| false
| 0
| 0.039216
| 0
| 0.196078
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f1100fec036785a8f1f9cc2464d3a13bd228a7ac
| 48
|
py
|
Python
|
ptcaccount2/__init__.py
|
Kitryn/PTCAccount2
|
b51f98cbe47b28a4663e520f5cd9af7f5d2c0013
|
[
"MIT"
] | 96
|
2016-08-13T17:50:05.000Z
|
2021-11-14T15:52:26.000Z
|
ptcaccount2/__init__.py
|
Kitryn/PTCAccount2
|
b51f98cbe47b28a4663e520f5cd9af7f5d2c0013
|
[
"MIT"
] | 38
|
2016-08-14T03:59:20.000Z
|
2017-04-02T10:56:00.000Z
|
ptcaccount2/__init__.py
|
Kitryn/PTCAccount2
|
b51f98cbe47b28a4663e520f5cd9af7f5d2c0013
|
[
"MIT"
] | 50
|
2016-08-13T18:31:30.000Z
|
2017-08-08T19:28:06.000Z
|
from ptcaccount2.accounts import random_account
| 24
| 47
| 0.895833
| 6
| 48
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 0.083333
| 48
| 1
| 48
| 48
| 0.931818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
74fab786d08151947bb4d06c6592d96772341870
| 34
|
py
|
Python
|
tapiriik/services/BeginnerTriathlete/__init__.py
|
prohfesor/tapiriik
|
0c476f8bb6b3d51674f0117b054777405ff2ee0d
|
[
"Apache-2.0"
] | 1,445
|
2015-01-01T21:43:31.000Z
|
2022-03-17T13:40:23.000Z
|
tapiriik/services/BeginnerTriathlete/__init__.py
|
prohfesor/tapiriik
|
0c476f8bb6b3d51674f0117b054777405ff2ee0d
|
[
"Apache-2.0"
] | 441
|
2015-01-02T03:37:49.000Z
|
2022-03-31T18:18:03.000Z
|
tapiriik/services/BeginnerTriathlete/__init__.py
|
prohfesor/tapiriik
|
0c476f8bb6b3d51674f0117b054777405ff2ee0d
|
[
"Apache-2.0"
] | 333
|
2015-01-06T12:14:15.000Z
|
2022-03-27T19:58:48.000Z
|
from .beginnertriathlete import *
| 17
| 33
| 0.823529
| 3
| 34
| 9.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2d349df5c69876658a0755bb42dfeffd642ee403
| 122
|
py
|
Python
|
py/problem_20.py
|
dfings/project-euler
|
f66389dcd8ff4e4d64fbd245cfdaebac7b9bd4ef
|
[
"Unlicense"
] | null | null | null |
py/problem_20.py
|
dfings/project-euler
|
f66389dcd8ff4e4d64fbd245cfdaebac7b9bd4ef
|
[
"Unlicense"
] | null | null | null |
py/problem_20.py
|
dfings/project-euler
|
f66389dcd8ff4e4d64fbd245cfdaebac7b9bd4ef
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
from math import factorial
from strings import sum_of_digits
print(sum_of_digits(factorial(100)))
| 17.428571
| 36
| 0.803279
| 20
| 122
| 4.7
| 0.7
| 0.106383
| 0.234043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027523
| 0.106557
| 122
| 6
| 37
| 20.333333
| 0.834862
| 0.163934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7428c74722f44162aac6f84fb4637c6b625c67f3
| 2,581
|
py
|
Python
|
awx/main/tests/unit/notifications/test_slack.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 2
|
2021-05-18T13:28:49.000Z
|
2021-07-06T14:04:08.000Z
|
awx/main/tests/unit/notifications/test_slack.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 20
|
2019-08-30T17:00:01.000Z
|
2022-03-29T17:58:11.000Z
|
awx/main/tests/unit/notifications/test_slack.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 1
|
2021-04-19T15:15:24.000Z
|
2021-04-19T15:15:24.000Z
|
import pytest
from unittest import mock
from django.core.mail.message import EmailMessage
import awx.main.notifications.slack_backend as slack_backend
def test_send_messages():
with mock.patch('awx.main.notifications.slack_backend.WebClient') as slack_sdk_mock:
WebClient_mock = slack_sdk_mock.return_value
WebClient_mock.chat_postMessage.return_value = {'ok': True}
backend = slack_backend.SlackBackend('slack_access_token')
message = EmailMessage(
'test subject',
'test body',
[],
[
'#random',
],
)
sent_messages = backend.send_messages(
[
message,
]
)
WebClient_mock.chat_postMessage.assert_called_once_with(channel='random', thread_ts=None, as_user=True, text='test subject')
assert sent_messages == 1
def test_send_messages_with_color():
with mock.patch('awx.main.notifications.slack_backend.WebClient') as slack_sdk_mock:
WebClient_mock = slack_sdk_mock.return_value
WebClient_mock.chat_postMessage.return_value = {'ok': True}
backend = slack_backend.SlackBackend('slack_access_token', hex_color='#006699')
message = EmailMessage(
'test subject',
'test body',
[],
[
'#random',
],
)
sent_messages = backend.send_messages(
[
message,
]
)
WebClient_mock.chat_postMessage.assert_called_once_with(
channel='random', as_user=True, thread_ts=None, attachments=[{'color': '#006699', 'text': 'test subject'}]
)
assert sent_messages == 1
def test_send_messages_fail():
with mock.patch('awx.main.notifications.slack_backend.WebClient') as slack_sdk_mock, pytest.raises(RuntimeError, match=r'.*not_in_channel.*'):
WebClient_mock = slack_sdk_mock.return_value
WebClient_mock.chat_postMessage.return_value = {'ok': False, 'error': 'not_in_channel'}
backend = slack_backend.SlackBackend('slack_access_token')
message = EmailMessage(
'test subject',
'test body',
[],
[
'#not_existing',
],
)
sent_messages = backend.send_messages(
[
message,
]
)
WebClient_mock.chat_postMessage.assert_called_once_with(channel='not_existing', as_user=True, text='test subject')
assert sent_messages == 0
| 33.960526
| 146
| 0.605967
| 267
| 2,581
| 5.535581
| 0.23221
| 0.079161
| 0.048714
| 0.113667
| 0.822057
| 0.784844
| 0.784844
| 0.784844
| 0.784844
| 0.748985
| 0
| 0.008237
| 0.29446
| 2,581
| 75
| 147
| 34.413333
| 0.803405
| 0
| 0
| 0.515152
| 0
| 0
| 0.158078
| 0.053468
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.045455
| false
| 0
| 0.060606
| 0
| 0.106061
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
74376fb523d823258e277f0fd770d4f64f996e6f
| 8,872
|
py
|
Python
|
scripts/checks/check_log_files.py
|
STScI-Citizen-Science/MTPipeline
|
903743afe55592ab67a240237d924c7c7383eec7
|
[
"Unlicense"
] | 4
|
2015-10-03T02:30:50.000Z
|
2016-04-07T04:07:19.000Z
|
scripts/checks/check_log_files.py
|
STScI-Citizen-Science/MTPipeline
|
903743afe55592ab67a240237d924c7c7383eec7
|
[
"Unlicense"
] | 3
|
2022-02-10T23:02:22.000Z
|
2022-02-10T23:02:51.000Z
|
scripts/checks/check_log_files.py
|
STScI-Citizen-Science/MTPipeline
|
903743afe55592ab67a240237d924c7c7383eec7
|
[
"Unlicense"
] | null | null | null |
import glob
logs_list = glob.glob('/astro/mtpipeline/logs/run_imaging_pipeline/*.log')
import datetime as dt
total_times_dict = {}
check_log_dict = {'cr_times': {'sum_avg': 0, 'count_avg': 0, 'avg_master': 0, 'sum_var': 0, 'var_master': 0, 'std_master': 0},
'dr_times': {'sum_avg': 0, 'count_avg': 0, 'avg_master': 0, 'sum_var': 0, 'var_master': 0, 'std_master': 0},
'png_times': {'sum_avg': 0, 'count_avg': 0, 'avg_master': 0, 'sum_var': 0, 'var_master': 0, 'std_master': 0}}
for log_file in logs_list:
with open(log_file, 'r') as f:
check_log_dict[log_file] = {'host': {'user': '', 'hostname': '', 'cpu': '', 'memory': '', 'filelist': ''},
'cr_master': {'sum_sec': 0, 'count': 0, 'avg': 0, 'sum_var': 0, 'var': 0, 'std': 0},
'dr_master': {'sum_sec': 0, 'count': 0, 'avg': 0, 'sum_var': 0, 'var': 0, 'std': 0},
'png_master': {'sum_sec': 0, 'count': 0, 'avg': 0, 'sum_var': 0, 'var': 0, 'std': 0}}
file_key = ''
lines = list(f)
for line in lines:
list_line = line.split(' ')
if len(list_line) <= 3:
continue
if 'User:' in line:
check_log_dict[log_file]['host']['user'] = list_line[-1].strip()
if 'Host:' in line:
check_log_dict[log_file]['host']['hostname'] = list_line[-1].replace('\n', '')
if check_log_dict[log_file]['host']['hostname'] not in total_times_dict:
total_times_dict[check_log_dict[log_file]['host']['hostname']] = {'cr': {'avg': [], 'std': []},
'dr': {'avg': [], 'std': []},
'png': {'avg': [], 'std': []},
'info': {'cpu': '', 'memory': '', 'count': 0}}
if 'Count:' in line:
check_log_dict[log_file]['host']['cpu'] = list_line[-1].strip()
if check_log_dict[log_file]['host']['cpu'] != '':
total_times_dict[check_log_dict[log_file]['host']['hostname']]['info']['cpu'] = check_log_dict[log_file]['host']['cpu']
if 'Memory:' in line:
check_log_dict[log_file]['host']['memory'] = list_line[-1].strip()
if check_log_dict[log_file]['host']['memory'] != '':
total_times_dict[check_log_dict[log_file]['host']['hostname']]['info']['memory'] = check_log_dict[log_file]['host']['memory']
if 'filelist:' in line:
check_log_dict[log_file]['host']['filelist'] = list_line[-1].strip()
if 'Current File:' in line:
file_key = list_line[-1].replace('\n', '')
check_log_dict[log_file][file_key] = {'cr_file': {'start': '', 'end': '', 'diff': 0},
'dr_file': {'start': '', 'end': '', 'diff': 0},
'png_file': {'start': '', 'end': '', 'diff': 0}}
if 'Running cr_reject' in line:
check_log_dict[log_file][file_key]['cr_file']['start'] = list_line[-5]
if 'Done running cr_reject' in line:
if check_log_dict[log_file][file_key]['cr_file']['start'] != '':
check_log_dict[log_file][file_key]['cr_file']['end'] = list_line[-6]
start_dt = dt.datetime.strptime(check_log_dict[log_file][file_key]['cr_file']['start'], '%H:%M:%S')
end_dt = dt.datetime.strptime(check_log_dict[log_file][file_key]['cr_file']['end'], '%H:%M:%S')
diff = (end_dt - start_dt)
check_log_dict[log_file][file_key]['cr_file']['diff'] = diff.seconds
check_log_dict[log_file]['cr_master']['sum_sec'] += diff.seconds
check_log_dict[log_file]['cr_master']['count'] += 1
total_times_dict[check_log_dict[log_file]['host']['hostname']]['info']['count'] += 2
if 'Running Astrodrizzle' in line:
check_log_dict[log_file][file_key]['dr_file']['start'] = list_line[-5]
if 'Done running astrodrizzle' in line:
if check_log_dict[log_file][file_key]['dr_file']['start'] != '':
check_log_dict[log_file][file_key]['dr_file']['end'] = list_line[-6]
start_dt = dt.datetime.strptime(check_log_dict[log_file][file_key]['dr_file']['start'], '%H:%M:%S')
end_dt = dt.datetime.strptime(check_log_dict[log_file][file_key]['dr_file']['end'], '%H:%M:%S')
diff = (end_dt - start_dt)
check_log_dict[log_file][file_key]['dr_file']['diff'] = diff.seconds
check_log_dict[log_file]['dr_master']['sum_sec'] += diff.seconds
check_log_dict[log_file]['dr_master']['count'] += 1
total_times_dict[check_log_dict[log_file]['host']['hostname']]['info']['count'] += 8
if 'Running png' in line:
check_log_dict[log_file][file_key]['png_file']['start'] = list_line[-5]
if 'Done running png' in line:
if check_log_dict[log_file][file_key]['png_file']['start'] != '':
check_log_dict[log_file][file_key]['png_file']['end'] = list_line[-6]
start_dt = dt.datetime.strptime(check_log_dict[log_file][file_key]['png_file']['start'], '%H:%M:%S')
end_dt = dt.datetime.strptime(check_log_dict[log_file][file_key]['png_file']['end'], '%H:%M:%S')
diff = (end_dt - start_dt)
check_log_dict[log_file][file_key]['png_file']['diff'] = diff.seconds
check_log_dict[log_file]['png_master']['sum_sec'] += diff.seconds
check_log_dict[log_file]['png_master']['count'] += 1
total_times_dict[check_log_dict[log_file]['host']['hostname']]['info']['count'] += 28
for key in check_log_dict[log_file].keys():
if key in ['cr_master', 'dr_master', 'png_master'] and 'count' in check_log_dict[log_file][key]:
if check_log_dict[log_file][key]['count'] != 0:
check_log_dict[log_file][key]['avg'] = check_log_dict[log_file][key]['sum_sec'] / check_log_dict[log_file][key]['count']
total_times_dict[check_log_dict[log_file]['host']['hostname']][key.split('_')[0]]['avg'].append(check_log_dict[log_file][key]['avg'])
for key in check_log_dict[log_file].keys():
if key not in ['host', 'cr_master', 'dr_master', 'png_master']:
for file_key in check_log_dict[log_file][key].keys():
if file_key == 'cr_file':
check_log_dict[log_file]['cr_master']['sum_var'] += (check_log_dict[log_file][key][file_key]['diff'] - check_log_dict[log_file]['cr_master']['avg'])**2
elif file_key == 'dr_file':
check_log_dict[log_file]['dr_master']['sum_var'] += (check_log_dict[log_file][key][file_key]['diff'] - check_log_dict[log_file]['dr_master']['avg'])**2
else:
check_log_dict[log_file]['png_master']['sum_var'] += (check_log_dict[log_file][key][file_key]['diff'] - check_log_dict[log_file]['png_master']['avg'])**2
for key in check_log_dict[log_file].keys():
if key != 'host' and 'count' in check_log_dict[log_file][key]:
if check_log_dict[log_file][key]['count'] != 0:
check_log_dict[log_file][key]['var'] = check_log_dict[log_file][key]['sum_var'] / check_log_dict[log_file][key]['count']
check_log_dict[log_file][key]['std'] = (check_log_dict[log_file][key]['var'])**0.5
total_times_dict[check_log_dict[log_file]['host']['hostname']][key.split('_')[0]]['std'].append(check_log_dict[log_file][key]['std'])
for key in total_times_dict.keys():
print 'Host: {}'.format(key)
for type_key in total_times_dict[key].keys():
avg = 0
std = 0
if type_key != 'info':
if len(total_times_dict[key][type_key]['avg']) != 0:
avg = sum(total_times_dict[key][type_key]['avg']) / float(len(total_times_dict[key][type_key]['avg']))
if len(total_times_dict[key][type_key]['std']) != 0:
std = sum(total_times_dict[key][type_key]['std']) / float(len(total_times_dict[key][type_key]['std']))
print '{0}:\nAVG: {1:.2f}\nSTD: {2:.2f}'.format(type_key, avg, std)
print 'CPU: {}\nMemory: {}\nFile Count: {}\n'.format(total_times_dict[key]['info']['cpu'], total_times_dict[key]['info']['memory'], total_times_dict[key]['info']['count'])
| 68.775194
| 175
| 0.536632
| 1,197
| 8,872
| 3.636591
| 0.072682
| 0.11739
| 0.198484
| 0.244659
| 0.84838
| 0.795084
| 0.775787
| 0.671491
| 0.5711
| 0.535952
| 0
| 0.01211
| 0.274008
| 8,872
| 129
| 175
| 68.775194
| 0.663717
| 0
| 0
| 0.072072
| 0
| 0
| 0.179646
| 0.005522
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.018018
| null | null | 0.027027
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
74a18839079aeb4c75530fb42cce6b47381b1947
| 161
|
py
|
Python
|
7 kyu/Printer Errors/Printer Errors.py
|
anthonyjatoba/codewars
|
76b0d66dd1ba76a4d136b658920cdf85fd5c4b06
|
[
"MIT"
] | null | null | null |
7 kyu/Printer Errors/Printer Errors.py
|
anthonyjatoba/codewars
|
76b0d66dd1ba76a4d136b658920cdf85fd5c4b06
|
[
"MIT"
] | null | null | null |
7 kyu/Printer Errors/Printer Errors.py
|
anthonyjatoba/codewars
|
76b0d66dd1ba76a4d136b658920cdf85fd5c4b06
|
[
"MIT"
] | null | null | null |
from functools import reduce
from operator import add
def printer_error(s):
return '{}/{}'.format(sum(s.count(e) for e in 'nopqrstuvwxyz'), len(s))
| 26.833333
| 75
| 0.670807
| 24
| 161
| 4.458333
| 0.791667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186335
| 161
| 6
| 76
| 26.833333
| 0.816794
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0.25
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
77d0254e4121fd97b469171a1a5a2ba7f8790a19
| 140
|
py
|
Python
|
django_cassandra_engine/base/validation.py
|
Saurabh-Singh-00/django-cassandra-engine
|
1028919bff04743dcbf70b849fd19980ee131740
|
[
"BSD-2-Clause"
] | 334
|
2015-01-07T17:22:46.000Z
|
2022-03-11T18:07:00.000Z
|
django_cassandra_engine/base/validation.py
|
hsamfm/django-cassandra-engine
|
f3ad96a00c8d91be9703ee4e4b1b45d4f93cb012
|
[
"BSD-2-Clause"
] | 130
|
2015-01-16T09:59:52.000Z
|
2022-01-18T03:58:36.000Z
|
django_cassandra_engine/base/validation.py
|
hsamfm/django-cassandra-engine
|
f3ad96a00c8d91be9703ee4e4b1b45d4f93cb012
|
[
"BSD-2-Clause"
] | 99
|
2015-01-19T12:16:24.000Z
|
2022-01-29T14:57:51.000Z
|
from django.db.backends.base.validation import BaseDatabaseValidation
class CassandraDatabaseValidation(BaseDatabaseValidation):
pass
| 23.333333
| 69
| 0.857143
| 12
| 140
| 10
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092857
| 140
| 5
| 70
| 28
| 0.944882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
77d075abdbbd01fe4bc2cc518568a951e6457a29
| 45
|
py
|
Python
|
spike_swarm_sim/objectives/__init__.py
|
r-sendra/SpikeSwarmSim
|
a5bd71cb93df0963588640c5d44b3891fa07457c
|
[
"MIT"
] | null | null | null |
spike_swarm_sim/objectives/__init__.py
|
r-sendra/SpikeSwarmSim
|
a5bd71cb93df0963588640c5d44b3891fa07457c
|
[
"MIT"
] | null | null | null |
spike_swarm_sim/objectives/__init__.py
|
r-sendra/SpikeSwarmSim
|
a5bd71cb93df0963588640c5d44b3891fa07457c
|
[
"MIT"
] | null | null | null |
from .fitness import *
from .reward import *
| 22.5
| 23
| 0.733333
| 6
| 45
| 5.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177778
| 45
| 2
| 24
| 22.5
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
77ec92a8b745ed5f8866213c9abf6777e78dcb28
| 29
|
py
|
Python
|
kalliope/tts/acapela/__init__.py
|
Z3RO1/KalliopeZERO
|
4a9d4660e561f43387e8de4616f530ecf36cbbae
|
[
"MIT"
] | 1
|
2017-10-09T18:02:32.000Z
|
2017-10-09T18:02:32.000Z
|
kalliope/tts/acapela/__init__.py
|
ngoales/kalliope
|
b1e58f2d1e949f572d48026603159992c0ce20ca
|
[
"MIT"
] | null | null | null |
kalliope/tts/acapela/__init__.py
|
ngoales/kalliope
|
b1e58f2d1e949f572d48026603159992c0ce20ca
|
[
"MIT"
] | 1
|
2021-11-21T19:08:15.000Z
|
2021-11-21T19:08:15.000Z
|
from .acapela import Acapela
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bb45ed940746e81f211204d77cc77528b107213d
| 17,748
|
py
|
Python
|
src/datadog_api_client/v2/api/logs_metrics_api.py
|
rchenzheng/datadog-api-client-python
|
2e86ac098c6f0c7fdd90ed218224587c0f8eafef
|
[
"Apache-2.0"
] | 32
|
2021-01-07T15:09:56.000Z
|
2022-01-30T05:49:23.000Z
|
src/datadog_api_client/v2/api/logs_metrics_api.py
|
rchenzheng/datadog-api-client-python
|
2e86ac098c6f0c7fdd90ed218224587c0f8eafef
|
[
"Apache-2.0"
] | 228
|
2020-09-03T14:03:54.000Z
|
2022-03-31T20:16:12.000Z
|
src/datadog_api_client/v2/api/logs_metrics_api.py
|
rchenzheng/datadog-api-client-python
|
2e86ac098c6f0c7fdd90ed218224587c0f8eafef
|
[
"Apache-2.0"
] | 12
|
2020-09-15T21:36:03.000Z
|
2022-03-31T17:13:17.000Z
|
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
from datadog_api_client.v2.api_client import ApiClient, Endpoint as _Endpoint
from datadog_api_client.v2.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types,
)
from datadog_api_client.v2.model.api_error_response import APIErrorResponse
from datadog_api_client.v2.model.logs_metric_create_request import LogsMetricCreateRequest
from datadog_api_client.v2.model.logs_metric_response import LogsMetricResponse
from datadog_api_client.v2.model.logs_metric_update_request import LogsMetricUpdateRequest
from datadog_api_client.v2.model.logs_metrics_response import LogsMetricsResponse
class LogsMetricsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self._create_logs_metric_endpoint = _Endpoint(
settings={
"response_type": (LogsMetricResponse,),
"auth": ["apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/logs/config/metrics",
"operation_id": "create_logs_metric",
"http_method": "POST",
"servers": None,
},
params_map={
"all": [
"body",
],
"required": [
"body",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"body": (LogsMetricCreateRequest,),
},
"attribute_map": {},
"location_map": {
"body": "body",
},
"collection_format_map": {},
},
headers_map={"accept": ["application/json"], "content_type": ["application/json"]},
api_client=api_client,
)
self._delete_logs_metric_endpoint = _Endpoint(
settings={
"response_type": None,
"auth": ["apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/logs/config/metrics/{metric_id}",
"operation_id": "delete_logs_metric",
"http_method": "DELETE",
"servers": None,
},
params_map={
"all": [
"metric_id",
],
"required": [
"metric_id",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"metric_id": (str,),
},
"attribute_map": {
"metric_id": "metric_id",
},
"location_map": {
"metric_id": "path",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self._get_logs_metric_endpoint = _Endpoint(
settings={
"response_type": (LogsMetricResponse,),
"auth": ["apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/logs/config/metrics/{metric_id}",
"operation_id": "get_logs_metric",
"http_method": "GET",
"servers": None,
},
params_map={
"all": [
"metric_id",
],
"required": [
"metric_id",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"metric_id": (str,),
},
"attribute_map": {
"metric_id": "metric_id",
},
"location_map": {
"metric_id": "path",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self._list_logs_metrics_endpoint = _Endpoint(
settings={
"response_type": (LogsMetricsResponse,),
"auth": ["apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/logs/config/metrics",
"operation_id": "list_logs_metrics",
"http_method": "GET",
"servers": None,
},
params_map={"all": [], "required": [], "nullable": [], "enum": [], "validation": []},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {},
"attribute_map": {},
"location_map": {},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self._update_logs_metric_endpoint = _Endpoint(
settings={
"response_type": (LogsMetricResponse,),
"auth": ["apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/logs/config/metrics/{metric_id}",
"operation_id": "update_logs_metric",
"http_method": "PATCH",
"servers": None,
},
params_map={
"all": [
"metric_id",
"body",
],
"required": [
"metric_id",
"body",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"metric_id": (str,),
"body": (LogsMetricUpdateRequest,),
},
"attribute_map": {
"metric_id": "metric_id",
},
"location_map": {
"metric_id": "path",
"body": "body",
},
"collection_format_map": {},
},
headers_map={"accept": ["application/json"], "content_type": ["application/json"]},
api_client=api_client,
)
def create_logs_metric(self, body, **kwargs):
"""Create a log-based metric # noqa: E501
Create a metric based on your ingested logs in your organization. Returns the log-based metric object from the request body when the request is successful. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_logs_metric(body, async_req=True)
>>> result = thread.get()
Args:
body (LogsMetricCreateRequest): The definition of the new log-based metric.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
LogsMetricResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs = self._create_logs_metric_endpoint.default_arguments(kwargs)
kwargs["body"] = body
return self._create_logs_metric_endpoint.call_with_http_info(**kwargs)
def delete_logs_metric(self, metric_id, **kwargs):
"""Delete a log-based metric # noqa: E501
Delete a specific log-based metric from your organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_logs_metric(metric_id, async_req=True)
>>> result = thread.get()
Args:
metric_id (str): The name of the log-based metric.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs = self._delete_logs_metric_endpoint.default_arguments(kwargs)
kwargs["metric_id"] = metric_id
return self._delete_logs_metric_endpoint.call_with_http_info(**kwargs)
def get_logs_metric(self, metric_id, **kwargs):
"""Get a log-based metric # noqa: E501
Get a specific log-based metric from your organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_logs_metric(metric_id, async_req=True)
>>> result = thread.get()
Args:
metric_id (str): The name of the log-based metric.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
LogsMetricResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs = self._get_logs_metric_endpoint.default_arguments(kwargs)
kwargs["metric_id"] = metric_id
return self._get_logs_metric_endpoint.call_with_http_info(**kwargs)
def list_logs_metrics(self, **kwargs):
"""Get all log-based metrics # noqa: E501
Get the list of configured log-based metrics with their definitions. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_logs_metrics(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
LogsMetricsResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs = self._list_logs_metrics_endpoint.default_arguments(kwargs)
return self._list_logs_metrics_endpoint.call_with_http_info(**kwargs)
def update_logs_metric(self, metric_id, body, **kwargs):
"""Update a log-based metric # noqa: E501
Update a specific log-based metric from your organization. Returns the log-based metric object from the request body when the request is successful. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_logs_metric(metric_id, body, async_req=True)
>>> result = thread.get()
Args:
metric_id (str): The name of the log-based metric.
body (LogsMetricUpdateRequest): New definition of the log-based metric.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
LogsMetricResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs = self._update_logs_metric_endpoint.default_arguments(kwargs)
kwargs["metric_id"] = metric_id
kwargs["body"] = body
return self._update_logs_metric_endpoint.call_with_http_info(**kwargs)
| 40.706422
| 177
| 0.547893
| 1,810
| 17,748
| 5.166851
| 0.11989
| 0.030796
| 0.027802
| 0.020317
| 0.842387
| 0.81127
| 0.786784
| 0.766788
| 0.738345
| 0.724551
| 0
| 0.005524
| 0.367591
| 17,748
| 435
| 178
| 40.8
| 0.827691
| 0.455432
| 0
| 0.594595
| 0
| 0
| 0.20554
| 0.033097
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.040541
| 0
| 0.094595
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
249cdecd056bc3b08f9ca1aaa1e10075ec8f6432
| 121
|
py
|
Python
|
tests/test_rest.py
|
cadia-lvl/POS
|
81a146707df82ecc972c28e055cc50b372f5d35b
|
[
"Apache-2.0"
] | 2
|
2020-07-28T14:10:02.000Z
|
2021-08-25T13:28:14.000Z
|
tests/test_rest.py
|
cadia-lvl/POS
|
81a146707df82ecc972c28e055cc50b372f5d35b
|
[
"Apache-2.0"
] | 3
|
2020-07-15T15:17:37.000Z
|
2021-11-08T15:49:02.000Z
|
tests/test_rest.py
|
cadia-lvl/POS
|
81a146707df82ecc972c28e055cc50b372f5d35b
|
[
"Apache-2.0"
] | 1
|
2020-07-15T16:15:05.000Z
|
2020-07-15T16:15:05.000Z
|
from pos import cli, vectorize_dim
def test_import():
assert True # Should just run, we want the imports to be OK
| 20.166667
| 64
| 0.727273
| 21
| 121
| 4.095238
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.223141
| 121
| 5
| 65
| 24.2
| 0.914894
| 0.371901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.666667
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
24a7d3b57ddc6c28c9a326500cd5083a67e456dd
| 34
|
py
|
Python
|
dnfal/encoding/__init__.py
|
altest-com/dnfal
|
d1fb15508c5583aeaa0957fcc3e37634d36bf237
|
[
"MIT"
] | null | null | null |
dnfal/encoding/__init__.py
|
altest-com/dnfal
|
d1fb15508c5583aeaa0957fcc3e37634d36bf237
|
[
"MIT"
] | 1
|
2020-03-31T17:04:09.000Z
|
2020-03-31T17:04:09.000Z
|
dnfal/encoding/__init__.py
|
altest-com/dnfal
|
d1fb15508c5583aeaa0957fcc3e37634d36bf237
|
[
"MIT"
] | null | null | null |
from ._encoder import FaceEncoder
| 17
| 33
| 0.852941
| 4
| 34
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
70294dee9515f01d39750cff334fcc59c544655d
| 172
|
py
|
Python
|
plugins/release_matching/matching.py
|
jerryrwu/harvest
|
6f405254fef59c84637bc976c252eef703b1cbc5
|
[
"Apache-2.0"
] | 9
|
2019-03-26T14:50:00.000Z
|
2020-11-10T16:44:08.000Z
|
plugins/release_matching/matching.py
|
jerryrwu/harvest
|
6f405254fef59c84637bc976c252eef703b1cbc5
|
[
"Apache-2.0"
] | 22
|
2019-03-02T23:16:13.000Z
|
2022-02-27T10:36:36.000Z
|
plugins/release_matching/matching.py
|
jerryrwu/harvest
|
6f405254fef59c84637bc976c252eef703b1cbc5
|
[
"Apache-2.0"
] | 5
|
2019-04-24T00:51:30.000Z
|
2020-11-06T18:31:49.000Z
|
def find_matches(match_info, results):
for target_match_info, obj in results:
if target_match_info.equals(match_info):
yield target_match_info, obj
| 34.4
| 48
| 0.72093
| 25
| 172
| 4.6
| 0.52
| 0.391304
| 0.391304
| 0.313043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.215116
| 172
| 4
| 49
| 43
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7052500f4ec53ee74cccaee715d86b9fd0e25839
| 26
|
py
|
Python
|
intel_edison/__init__.py
|
abhirocks1211/countly-sdk-iot-python
|
0ccc5120661c5e356d6a569b31ba5fb135fa8efb
|
[
"MIT"
] | 9
|
2016-04-06T05:23:43.000Z
|
2022-02-21T04:41:47.000Z
|
intel_edison/__init__.py
|
abhirocks1211/countly-sdk-iot-python
|
0ccc5120661c5e356d6a569b31ba5fb135fa8efb
|
[
"MIT"
] | 7
|
2016-01-07T22:09:48.000Z
|
2016-02-16T12:44:09.000Z
|
intel_edison/__init__.py
|
abhirocks1211/countly-sdk-iot-python
|
0ccc5120661c5e356d6a569b31ba5fb135fa8efb
|
[
"MIT"
] | 11
|
2016-03-17T14:03:44.000Z
|
2022-02-28T05:32:03.000Z
|
from intel_edison import *
| 26
| 26
| 0.846154
| 4
| 26
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7060ad52e2f5d16a49ba2084ecb4dfca04badf3d
| 121
|
py
|
Python
|
python-aws-lambda/lambda_function.py
|
alexhwoods/deployment-examples
|
e4a94220b8d483dff3268eb3b07f5e19934c861b
|
[
"MIT"
] | 28
|
2018-03-05T09:37:23.000Z
|
2021-04-13T20:47:58.000Z
|
python-aws-lambda/lambda_function.py
|
alexhwoods/deployment-examples
|
e4a94220b8d483dff3268eb3b07f5e19934c861b
|
[
"MIT"
] | 162
|
2018-04-08T15:08:28.000Z
|
2022-03-20T12:07:12.000Z
|
python-aws-lambda/lambda_function.py
|
alexhwoods/deployment-examples
|
e4a94220b8d483dff3268eb3b07f5e19934c861b
|
[
"MIT"
] | 39
|
2018-06-27T09:44:43.000Z
|
2022-03-02T14:25:31.000Z
|
from rook.serverless import serverless_rook
@serverless_rook
def lambda_handler(event, context):
return "Hello world"
| 20.166667
| 43
| 0.818182
| 16
| 121
| 6
| 0.75
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115702
| 121
| 5
| 44
| 24.2
| 0.897196
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
707f3fd888811727c63653d6e8f2059e2011aa73
| 152
|
py
|
Python
|
jumpscale/clients/sshkey/__init__.py
|
zaibon/js-ng
|
8b63c04757d1432ed4aa588500a113610701de14
|
[
"Apache-2.0"
] | 2
|
2021-04-28T10:46:08.000Z
|
2021-12-22T12:33:34.000Z
|
jumpscale/clients/sshkey/__init__.py
|
zaibon/js-ng
|
8b63c04757d1432ed4aa588500a113610701de14
|
[
"Apache-2.0"
] | 321
|
2020-06-15T11:48:21.000Z
|
2022-03-29T22:13:33.000Z
|
jumpscale/clients/sshkey/__init__.py
|
zaibon/js-ng
|
8b63c04757d1432ed4aa588500a113610701de14
|
[
"Apache-2.0"
] | 4
|
2020-06-18T06:19:29.000Z
|
2021-07-14T12:54:47.000Z
|
def export_module_as():
from jumpscale.core.base import StoredFactory
from .sshkey import SSHKeyClient
return StoredFactory(SSHKeyClient)
| 21.714286
| 49
| 0.776316
| 17
| 152
| 6.823529
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171053
| 152
| 6
| 50
| 25.333333
| 0.920635
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
708269b312e9b8e67e299601351fc1adc81322d6
| 23,537
|
py
|
Python
|
code/set_up_experiments.py
|
lingo-mit/transformers
|
478fb18a9f9680321f0d37dc999ea444e9287cc0
|
[
"Apache-2.0"
] | null | null | null |
code/set_up_experiments.py
|
lingo-mit/transformers
|
478fb18a9f9680321f0d37dc999ea444e9287cc0
|
[
"Apache-2.0"
] | null | null | null |
code/set_up_experiments.py
|
lingo-mit/transformers
|
478fb18a9f9680321f0d37dc999ea444e9287cc0
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
TRAIN_SCRIPT_TEMPLATE = "python transformers/examples/language-modeling/run_language_modeling.py --model_type gpt2 --tokenizer_name model-configs/{0}-config --config_name model-configs/{0}-config/config.json --train_data_file ../data/wikitext-103-raw/wiki.train.raw --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir train-outputs/{1}/model --do_train --do_eval --evaluate_during_training --per_device_train_batch_size 3 --per_device_eval_batch_size 3 --num_train_epochs 10 --dataloader_drop_last --save_steps 500 --save_total_limit 20 --augmented --augmentation_function {2} --train_function {3} --eval_function {4}"
TRAIN_SCRIPT_TEMPLATE_13 = "python transformers/examples/language-modeling/run_language_modeling.py --model_type gpt2 --tokenizer_name model-configs/{0}-config --config_name model-configs/{0}-config/config.json --train_data_file ../data/wikitext-103-raw/wiki.train.raw --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir train-outputs/{1}/13-model --do_train --do_eval --evaluate_during_training --per_device_train_batch_size 3 --per_device_eval_batch_size 3 --num_train_epochs 10 --dataloader_drop_last --save_steps 500 --save_total_limit 20 --augmented --augmentation_function {2} --train_function {3} --eval_function {4} --seed 13"
TRAIN_SCRIPT_TEMPLATE_7 = "python transformers/examples/language-modeling/run_language_modeling.py --model_type gpt2 --tokenizer_name model-configs/{0}-config --config_name model-configs/{0}-config/config.json --train_data_file ../data/wikitext-103-raw/wiki.train.raw --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir train-outputs/{1}/7-model --do_train --do_eval --evaluate_during_training --per_device_train_batch_size 3 --per_device_eval_batch_size 3 --num_train_epochs 10 --dataloader_drop_last --save_steps 500 --save_total_limit 20 --augmented --augmentation_function {2} --train_function {3} --eval_function {4} --seed 7"
EVAL_SCRIPT_256_TEMPLATE = "python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/{1}/model --tokenizer_name model-configs/{0}-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/{1}/{2}-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function {3} --eval_function {4}"
EVAL_SCRIPT_256_TEMPLATE_13 = "python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/{1}/13-model --tokenizer_name model-configs/{0}-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/{1}/13-{2}-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function {3} --eval_function {4}"
EVAL_SCRIPT_256_TEMPLATE_7 = "python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/{1}/7-model --tokenizer_name model-configs/{0}-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/{1}/7-{2}-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function {3} --eval_function {4}"
EVAL_SCRIPT_FIRST_256_TEMPLATE = "python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/{1}/model --tokenizer_name model-configs/{0}-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/{1}/{2}-first-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function {3} --eval_function {4}"
EVAL_SCRIPT_FIRST_256_TEMPLATE_13 = "python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/{1}/13-model --tokenizer_name model-configs/{0}-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/{1}/13-{2}-first-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function {3} --eval_function {4}"
EVAL_SCRIPT_FIRST_256_TEMPLATE_7 = "python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/{1}/7-model --tokenizer_name model-configs/{0}-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/{1}/7-{2}-first-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function {3} --eval_function {4}"
EVAL_SCRIPT_FULL_TEMPLATE = "python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/{1}/model --tokenizer_name model-configs/{0}-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/{1}/{2}-1 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function {3} --eval_function {4}"
EVAL_SCRIPT_FULL_TEMPLATE_13 = "python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/{1}/13-model --tokenizer_name model-configs/{0}-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/{1}/13-{2}-1 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function {3} --eval_function {4}"
EVAL_SCRIPT_FULL_TEMPLATE_7 = "python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/{1}/7-model --tokenizer_name model-configs/{0}-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/{1}/7-{2}-1 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function {3} --eval_function {4}"
TEST_SCRIPT_256_TEMPLATE = "python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/{1}/model --tokenizer_name model-configs/{0}-config --eval_data_file ../data/wikitext-103-raw/wiki.test.raw --output_dir test-outputs/{1}/{2}-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function {3} --eval_function {4}"
TEST_SCRIPT_256_TEMPLATE_13 = "python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/{1}/13-model --tokenizer_name model-configs/{0}-config --eval_data_file ../data/wikitext-103-raw/wiki.test.raw --output_dir test-outputs/{1}/13-{2}-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function {3} --eval_function {4}"
TEST_SCRIPT_256_TEMPLATE_7 = "python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/{1}/7-model --tokenizer_name model-configs/{0}-config --eval_data_file ../data/wikitext-103-raw/wiki.test.raw --output_dir test-outputs/{1}/7-{2}-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function {3} --eval_function {4}"
TEST_SCRIPT_FULL_TEMPLATE = "python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/{1}/model --tokenizer_name model-configs/{0}-config --eval_data_file ../data/wikitext-103-raw/wiki.test.raw --output_dir test-outputs/{1}/{2}-1 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function {3} --eval_function {4}"
TEST_SCRIPT_FULL_TEMPLATE_13 = "python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/{1}/13-model --tokenizer_name model-configs/{0}-config --eval_data_file ../data/wikitext-103-raw/wiki.test.raw --output_dir test-outputs/{1}/13-{2}-1 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function {3} --eval_function {4}"
TEST_SCRIPT_FULL_TEMPLATE_7 = "python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/{1}/7-model --tokenizer_name model-configs/{0}-config --eval_data_file ../data/wikitext-103-raw/wiki.test.raw --output_dir test-outputs/{1}/7-{2}-1 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function {3} --eval_function {4}"
def set_up_experiments(models):
if not os.path.isdir("train-scripts"):
os.mkdir("train-scripts")
if not os.path.isdir("eval-scripts"):
os.mkdir("eval-scripts")
if not os.path.isdir("test-scripts"):
os.mkdir("test-scripts")
for model in models:
make_train_scripts(model, models)
make_eval_scripts(model, models)
make_test_scripts(model, models)
def make_train_scripts(model, models):
if not os.path.isdir("train-scripts/{}".format(model)):
os.mkdir("train-scripts/{}".format(model))
if not os.path.exists("train-scripts/{}/train.sh".format(model)):
train_script = TRAIN_SCRIPT_TEMPLATE.format(models[model]["size"],
model,
models[model]["augmentation_function"],
models[model]["train_function"],
models[model]["eval_function"])
with open("train-scripts/{}/train.sh".format(model), "x") as f:
f.write(train_script)
if not os.path.exists("train-scripts/{}/train-13.sh".format(model)):
train_script_13 = TRAIN_SCRIPT_TEMPLATE_13.format(models[model]["size"],
model,
models[model]["augmentation_function"],
models[model]["train_function"],
models[model]["eval_function"])
with open("train-scripts/{}/train-13.sh".format(model), "x") as f:
f.write(train_script_13)
if not os.path.exists("train-scripts/{}/train-7.sh".format(model)):
train_script_7 = TRAIN_SCRIPT_TEMPLATE_7.format(models[model]["size"],
model,
models[model]["augmentation_function"],
models[model]["train_function"],
models[model]["eval_function"])
with open("train-scripts/{}/train-7.sh".format(model), "x") as f:
f.write(train_script_7)
def make_eval_scripts(model, models):
if not os.path.isdir("eval-scripts/{}".format(model)):
os.mkdir("eval-scripts/{}".format(model))
for other_model in models:
if models[model]["size"] != models[other_model]["size"]:
continue
if not os.path.exists("eval-scripts/{}/{}-256.sh".format(model, other_model)):
eval_script_256 = EVAL_SCRIPT_256_TEMPLATE.format(models[model]["size"],
model,
other_model,
models[other_model]["augmentation_function_256"],
models[other_model]["eval_function_256"])
with open("eval-scripts/{}/{}-256.sh".format(model, other_model), "x") as f:
f.write(eval_script_256)
if not os.path.exists("eval-scripts/{}/{}-256-13.sh".format(model, other_model)):
eval_script_256_13 = EVAL_SCRIPT_256_TEMPLATE_13.format(models[model]["size"],
model,
other_model,
models[other_model]["augmentation_function_256"],
models[other_model]["eval_function_256"])
with open("eval-scripts/{}/{}-256-13.sh".format(model, other_model), "x") as f:
f.write(eval_script_256_13)
if not os.path.exists("eval-scripts/{}/{}-256-7.sh".format(model, other_model)):
eval_script_256_7 = EVAL_SCRIPT_256_TEMPLATE_7.format(models[model]["size"],
model,
other_model,
models[other_model]["augmentation_function_256"],
models[other_model]["eval_function_256"])
with open("eval-scripts/{}/{}-256-7.sh".format(model, other_model), "x") as f:
f.write(eval_script_256_7)
if not os.path.exists("eval-scripts/{}/{}-first-256.sh".format(model, other_model)):
first_256_eval_function = "penultimate_sixth_eval" if models[model]["size"] == 1536 else "penultimate_quarter_eval"
eval_script_first_256 = EVAL_SCRIPT_FIRST_256_TEMPLATE.format(models[model]["size"],
model,
other_model,
models[other_model]["augmentation_function_256"],
first_256_eval_function)
with open("eval-scripts/{}/{}-first-256.sh".format(model, other_model), "x") as f:
f.write(eval_script_first_256)
if not os.path.exists("eval-scripts/{}/{}-first-256-13.sh".format(model, other_model)):
first_256_eval_function_13 = "penultimate_sixth_eval" if models[model]["size"] == 1536 else "penultimate_quarter_eval"
eval_script_first_256_13 = EVAL_SCRIPT_FIRST_256_TEMPLATE_13.format(models[model]["size"],
model,
other_model,
models[other_model]["augmentation_function_256"],
first_256_eval_function_13)
with open("eval-scripts/{}/{}-first-256-13.sh".format(model, other_model), "x") as f:
f.write(eval_script_first_256_13)
if not os.path.exists("eval-scripts/{}/{}-first-256-7.sh".format(model, other_model)):
first_256_eval_function_7 = "penultimate_sixth_eval" if models[model]["size"] == 1536 else "penultimate_quarter_eval"
eval_script_first_256_7 = EVAL_SCRIPT_FIRST_256_TEMPLATE_7.format(models[model]["size"],
model,
other_model,
models[other_model]["augmentation_function_256"],
first_256_eval_function_7)
with open("eval-scripts/{}/{}-first-256-7.sh".format(model, other_model), "x") as f:
f.write(eval_script_first_256_7)
if not os.path.exists("eval-scripts/{}/{}-1.sh".format(model, other_model)):
eval_script_full = EVAL_SCRIPT_FULL_TEMPLATE.format(models[model]["size"],
model,
other_model,
models[other_model]["augmentation_function_full"],
models[other_model]["eval_function_full"])
with open("eval-scripts/{}/{}-1.sh".format(model, other_model), "x") as f:
f.write(eval_script_full)
if not os.path.exists("eval-scripts/{}/{}-1-13.sh".format(model, other_model)):
eval_script_full_13 = EVAL_SCRIPT_FULL_TEMPLATE_13.format(models[model]["size"],
model,
other_model,
models[other_model]["augmentation_function_full"],
models[other_model]["eval_function_full"])
with open("eval-scripts/{}/{}-1-13.sh".format(model, other_model), "x") as f:
f.write(eval_script_full_13)
if not os.path.exists("eval-scripts/{}/{}-1-7.sh".format(model, other_model)):
eval_script_full_7 = EVAL_SCRIPT_FULL_TEMPLATE_7.format(models[model]["size"],
model,
other_model,
models[other_model]["augmentation_function_full"],
models[other_model]["eval_function_full"])
with open("eval-scripts/{}/{}-1-7.sh".format(model, other_model), "x") as f:
f.write(eval_script_full_7)
def make_test_scripts(model, models):
if not os.path.isdir("test-scripts/{}".format(model)):
os.mkdir("test-scripts/{}".format(model))
for other_model in models:
if models[model]["size"] != models[other_model]["size"]:
continue
if not os.path.exists("test-scripts/{}/{}-256.sh".format(model, other_model)):
test_script_256 = TEST_SCRIPT_256_TEMPLATE.format(models[model]["size"],
model,
other_model,
models[other_model]["augmentation_function_256"],
models[other_model]["eval_function_256"])
with open("test-scripts/{}/{}-256.sh".format(model, other_model), "x") as f:
f.write(test_script_256)
if not os.path.exists("test-scripts/{}/{}-256-13.sh".format(model, other_model)):
test_script_256_13 = TEST_SCRIPT_256_TEMPLATE_13.format(models[model]["size"],
model,
other_model,
models[other_model]["augmentation_function_256"],
models[other_model]["eval_function_256"])
with open("test-scripts/{}/{}-256-13.sh".format(model, other_model), "x") as f:
f.write(test_script_256_13)
if not os.path.exists("test-scripts/{}/{}-256-7.sh".format(model, other_model)):
test_script_256_7 = TEST_SCRIPT_256_TEMPLATE_7.format(models[model]["size"],
model,
other_model,
models[other_model]["augmentation_function_256"],
models[other_model]["eval_function_256"])
with open("test-scripts/{}/{}-256-7.sh".format(model, other_model), "x") as f:
f.write(test_script_256_7)
if not os.path.exists("test-scripts/{}/{}-1.sh".format(model, other_model)):
test_script_full = TEST_SCRIPT_FULL_TEMPLATE.format(models[model]["size"],
model,
other_model,
models[other_model]["augmentation_function_full"],
models[other_model]["eval_function_full"])
with open("test-scripts/{}/{}-1.sh".format(model, other_model), "x") as f:
f.write(test_script_full)
if not os.path.exists("test-scripts/{}/{}-1-13.sh".format(model, other_model)):
test_script_full_13 = TEST_SCRIPT_FULL_TEMPLATE_13.format(models[model]["size"],
model,
other_model,
models[other_model]["augmentation_function_full"],
models[other_model]["eval_function_full"])
with open("test-scripts/{}/{}-1-13.sh".format(model, other_model), "x") as f:
f.write(test_script_full_13)
if not os.path.exists("test-scripts/{}/{}-1-7.sh".format(model, other_model)):
test_script_full_7 = TEST_SCRIPT_FULL_TEMPLATE_7.format(models[model]["size"],
model,
other_model,
models[other_model]["augmentation_function_full"],
models[other_model]["eval_function_full"])
with open("test-scripts/{}/{}-1-7.sh".format(model, other_model), "x") as f:
f.write(test_script_full_7)
if __name__ == '__main__':
with open("models.txt", "r") as f:
models_list = f.readlines()
models = {}
for m in models_list[1:]:
model, a_f, t_f, e_f = map(lambda x: x.strip(), m.split(","))
size = sum(int(x) for x in model.split("-")[0].split("+"))
if size == 1024:
# if "identity" in a_f:
# a_f_256 = "identity_quarter"
# else:
if "identity" in a_f and "old" in a_f:
a_f_256 = "identity_old_quarter"
else:
a_f_256 = a_f + "_quarter"
e_f_256 = "last_quarter_eval"
elif size == 1536:
if "identity_third" == a_f:
a_f_256 = "identity_sixth"
else:
a_f_256 = a_f + "_sixth"
e_f_256 = "last_sixth_eval"
else:
raise ValueError("Invalid model size {}".format(size))
if "identity" in a_f and "old" not in a_f:
a_f_full = "identity_full"
else:
a_f_full = a_f + "_full"
e_f_full = "last_element_eval"
models[model] = {"size": size,
"augmentation_function": a_f,
"train_function": t_f,
"eval_function": e_f,
"augmentation_function_256": a_f_256,
"eval_function_256": e_f_256,
"augmentation_function_full": a_f_full,
"eval_function_full": e_f_full}
set_up_experiments(models)
| 83.169611
| 652
| 0.569104
| 2,736
| 23,537
| 4.580775
| 0.044591
| 0.06064
| 0.053858
| 0.043086
| 0.937685
| 0.915982
| 0.905769
| 0.889731
| 0.834836
| 0.793346
| 0
| 0.042589
| 0.31066
| 23,537
| 282
| 653
| 83.464539
| 0.729861
| 0.002549
| 0
| 0.346491
| 0
| 0.078947
| 0.426873
| 0.274571
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017544
| false
| 0
| 0.008772
| 0
| 0.026316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5608133a2c0c4e8fe03f3cc2da06722832892404
| 20,320
|
py
|
Python
|
tests/unit/engine/test_generate_passphrases.py
|
openstack/airship-pegleg
|
cbc87967ebd572463a893b03097b615b99d9dbcf
|
[
"Apache-2.0"
] | 9
|
2018-06-20T20:16:29.000Z
|
2019-03-24T23:07:25.000Z
|
tests/unit/engine/test_generate_passphrases.py
|
openstack/airship-pegleg
|
cbc87967ebd572463a893b03097b615b99d9dbcf
|
[
"Apache-2.0"
] | 8
|
2020-11-16T16:22:58.000Z
|
2021-05-14T13:29:45.000Z
|
tests/unit/engine/test_generate_passphrases.py
|
airshipit/pegle
|
772d3a47a6db425be8249f770a732dbad4e00b08
|
[
"Apache-2.0"
] | 2
|
2020-03-02T13:53:53.000Z
|
2021-07-19T05:02:13.000Z
|
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import tempfile
from unittest import mock
import uuid
from cryptography import fernet
import pytest
from testfixtures import log_capture
import yaml
from pegleg.engine.generators.passphrase_generator import PassphraseGenerator
from pegleg.engine.util.cryptostring import CryptoString
from pegleg.engine.util import encryption
from pegleg.engine import util
import pegleg
TEST_PASSPHRASES_CATALOG = yaml.safe_load(
"""
---
schema: pegleg/PassphraseCatalog/v1
metadata:
schema: metadata/Document/v1
name: cluster-passphrases
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
passphrases:
- description: 'short description of the passphrase'
document_name: ceph_swift_keystone_password
encrypted: true
- description: 'short description of the passphrase'
document_name: ucp_keystone_admin_password
encrypted: true
length: 24
- description: 'short description of the passphrase'
document_name: osh_barbican_oslo_db_password
encrypted: true
length: 23
- description: 'short description of the passphrase'
document_name: osh_cinder_password
encrypted: true
length: 25
- description: 'short description of the passphrase'
document_name: osh_oslo_db_admin_password
encrypted: true
length: 0
- description: 'short description of the passphrase'
document_name: osh_placement_password
encrypted: true
length: 32
...
""")
TEST_OVERRIDE_PASSPHRASES_CATALOG = yaml.safe_load(
"""
---
schema: pegleg/PassphraseCatalog/v1
metadata:
schema: metadata/Document/v1
name: cluster-passphrases
layeringDefinition:
abstract: false
layer: site
storagePolicy: cleartext
data:
passphrases:
- description: 'short description of the passphrase'
document_name: ucp_keystone_admin_password
encrypted: true
length: 24
- description: 'short description of the passphrase'
document_name: osh_cinder_password
encrypted: true
length: 25
- description: 'short description of the passphrase'
document_name: osh_placement_password
encrypted: true
length: 32
...
""")
TEST_GLOBAL_PASSPHRASES_CATALOG = yaml.safe_load(
"""
---
schema: pegleg/PassphraseCatalog/v1
metadata:
schema: metadata/Document/v1
name: cluster-passphrases
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
passphrases:
- description: 'description of passphrase from global'
document_name: passphrase_from_global
encrypted: true
...
""")
TEST_TYPES_CATALOG = yaml.safe_load(
"""
---
schema: pegleg/PassphraseCatalog/v1
metadata:
schema: metadata/Document/v1
name: cluster-passphrases
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
passphrases:
- description: 'description of base64 required passphrases'
document_name: base64_encoded_passphrase_doc
encrypted: true
type: base64
- description: 'description of uuid secret'
document_name: uuid_passphrase_doc
encrypted: true
encoding: none
type: uuid
- description: 'description of random passphrase'
document_name: passphrase_doc
encrypted: true
type: passphrase
- description: 'description of default random passphrase'
document_name: default_passphrase_doc
encrypted: true
...
""")
TEST_PROFILES_CATALOG = yaml.safe_load(
"""
---
schema: pegleg/PassphraseCatalog/v1
metadata:
schema: metadata/Document/v1
name: cluster-passphrases
layeringDefinition:
abstract: false
layer: global
storagePolicy: cleartext
data:
passphrases:
- description: 'default profile'
document_name: default_passphrase
encrypted: true
profile: default
- description: 'alphanumeric profile'
document_name: alphanumeric_passphrase
encrypted: true
profile: alphanumeric
- description: 'alphanumeric_lower profile'
document_name: alphanumeric_lower_passphrase
encrypted: true
profile: alphanumeric_lower
- description: 'alphanumeric_upper profile'
document_name: alphanumeric_upper_passphrase
encrypted: true
profile: alphanumeric_upper
- description: 'all profile'
document_name: all_passphrase
encrypted: true
profile: all
- description: 'hex_lower profile'
document_name: hex_lower_passphrase
encrypted: true
profile: hex_lower
- description: 'hex_upper profile'
document_name: hex_upper_passphrase
encrypted: true
profile: hex_upper
...
""")
TEST_REPOSITORIES = {
'repositories': {
'global': {
'revision': '843d1a50106e1f17f3f722e2ef1634ae442fe68f',
'url': 'ssh://REPO_USERNAME@gerrit:29418/aic-clcp-manifests.git'
},
'secrets': {
'revision': 'master',
'url': (
'ssh://REPO_USERNAME@gerrit:29418/aic-clcp-security-'
'manifests.git')
}
}
}
TEST_SITE_DEFINITION = {
'data': {
'revision': 'v1.0',
'site_type': 'cicd',
},
'metadata': {
'layeringDefinition': {
'abstract': 'false',
'layer': 'site',
},
'name': 'test-site',
'schema': 'metadata/Document/v1',
'storagePolicy': 'cleartext',
},
'schema': 'pegleg/SiteDefinition/v1',
}
TEST_SITE_DOCUMENTS = [TEST_SITE_DEFINITION, TEST_PASSPHRASES_CATALOG]
TEST_GLOBAL_SITE_DOCUMENTS = [
TEST_SITE_DEFINITION, TEST_GLOBAL_PASSPHRASES_CATALOG
]
TEST_TYPE_SITE_DOCUMENTS = [TEST_SITE_DEFINITION, TEST_TYPES_CATALOG]
TEST_PROFILES_SITE_DOCUMENTS = [TEST_SITE_DEFINITION, TEST_PROFILES_CATALOG]
@mock.patch.object(
util.definition,
'documents_for_site',
autospec=True,
return_value=TEST_SITE_DOCUMENTS)
@mock.patch.object(
pegleg.config,
'get_site_repo',
autospec=True,
return_value='cicd_site_repo')
@mock.patch.object(
util.definition,
'site_files',
autospec=True,
return_value=[
'cicd_site_repo/site/cicd/passphrases/passphrase-catalog.yaml',
])
@mock.patch.dict(
os.environ, {
'PEGLEG_PASSPHRASE': 'ytrr89erARAiPE34692iwUMvWqqBvC',
'PEGLEG_SALT': 'MySecretSalt1234567890]['
})
def test_generate_passphrases(*_):
_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(_dir, 'cicd_site_repo'), exist_ok=True)
PassphraseGenerator('cicd', _dir, 'test_author').generate()
passphrase_dir = os.path.join(
_dir, 'site', 'cicd', 'secrets', 'passphrases')
assert 6 == len(os.listdir(passphrase_dir))
for passphrase in TEST_PASSPHRASES_CATALOG['data']['passphrases']:
passphrase_file_name = '{}.yaml'.format(passphrase['document_name'])
passphrase_file_path = os.path.join(
_dir, 'site', 'cicd', 'secrets', 'passphrases',
passphrase_file_name)
assert os.path.isfile(passphrase_file_path)
with open(passphrase_file_path) as stream:
doc = yaml.safe_load(stream)
assert doc['schema'] == 'pegleg/PeglegManagedDocument/v1'
assert doc['metadata']['storagePolicy'] == 'cleartext'
assert 'encrypted' in doc['data']
assert doc['data']['encrypted']['by'] == 'test_author'
assert 'generated' in doc['data']
assert doc['data']['generated']['by'] == 'test_author'
assert 'managedDocument' in doc['data']
assert doc['data']['managedDocument']['metadata'][
'storagePolicy'] == 'encrypted'
decrypted_passphrase = encryption.decrypt(
doc['data']['managedDocument']['data'],
os.environ['PEGLEG_PASSPHRASE'].encode(),
os.environ['PEGLEG_SALT'].encode())
if passphrase_file_name == 'osh_placement_password.yaml':
assert len(decrypted_passphrase) == 32
elif passphrase_file_name == 'osh_cinder_password.yaml':
assert len(decrypted_passphrase) == 25
else:
assert len(decrypted_passphrase) == 24
@log_capture()
def test_generate_passphrases_exception(capture):
unenc_data = uuid.uuid4().bytes
passphrase1 = uuid.uuid4().bytes
passphrase2 = uuid.uuid4().bytes
salt1 = uuid.uuid4().bytes
salt2 = uuid.uuid4().bytes
# Generate random data and encrypt it
enc_data = encryption.encrypt(unenc_data, passphrase1, salt1)
# Decrypt using the wrong key to see to see the InvalidToken error
with pytest.raises(fernet.InvalidToken):
encryption.decrypt(enc_data, passphrase2, salt2)
capture.check(
(
'pegleg.engine.util.encryption', 'ERROR', (
'Signature verification to decrypt secrets failed. '
'Please check your provided passphrase and salt and '
'try again.')))
@mock.patch.object(
util.definition,
'documents_for_site',
autospec=True,
return_value=TEST_SITE_DOCUMENTS)
@mock.patch.object(
pegleg.config,
'get_site_repo',
autospec=True,
return_value='cicd_site_repo')
@mock.patch.object(
util.definition,
'site_files',
autospec=True,
return_value=[
'cicd_site_repo/site/cicd/passphrases/passphrase-catalog.yaml',
])
@mock.patch.dict(
os.environ, {
'PEGLEG_PASSPHRASE': 'ytrr89erARAiPE34692iwUMvWqqBvC',
'PEGLEG_SALT': 'MySecretSalt1234567890]['
})
def test_generate_passphrases_with_overidden_passphrase_catalog(*_):
_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(_dir, 'cicd_site_repo'), exist_ok=True)
PassphraseGenerator(
'cicd', _dir, 'test_author',
[TEST_OVERRIDE_PASSPHRASES_CATALOG]).generate()
passphrase_dir = os.path.join(
_dir, 'site', 'cicd', 'secrets', 'passphrases')
assert 3 == len(os.listdir(passphrase_dir))
for passphrase in TEST_OVERRIDE_PASSPHRASES_CATALOG['data']['passphrases']:
passphrase_file_name = '{}.yaml'.format(passphrase['document_name'])
passphrase_file_path = os.path.join(
_dir, 'site', 'cicd', 'secrets', 'passphrases',
passphrase_file_name)
assert os.path.isfile(passphrase_file_path)
with open(passphrase_file_path) as stream:
doc = yaml.safe_load(stream)
assert doc['schema'] == 'pegleg/PeglegManagedDocument/v1'
assert doc['metadata']['storagePolicy'] == 'cleartext'
assert 'encrypted' in doc['data']
assert doc['data']['encrypted']['by'] == 'test_author'
assert 'generated' in doc['data']
assert doc['data']['generated']['by'] == 'test_author'
assert 'managedDocument' in doc['data']
assert doc['data']['managedDocument']['metadata'][
'storagePolicy'] == 'encrypted'
decrypted_passphrase = encryption.decrypt(
doc['data']['managedDocument']['data'],
os.environ['PEGLEG_PASSPHRASE'].encode(),
os.environ['PEGLEG_SALT'].encode())
if passphrase_file_name == 'osh_placement_password.yaml':
assert len(decrypted_passphrase) == 32
elif passphrase_file_name == 'osh_cinder_password.yaml':
assert len(decrypted_passphrase) == 25
else:
assert len(decrypted_passphrase) == 24
@mock.patch.object(
util.definition,
'documents_for_site',
autospec=True,
return_value=TEST_GLOBAL_SITE_DOCUMENTS)
@mock.patch.object(
pegleg.config,
'get_site_repo',
autospec=True,
return_value='cicd_site_repo')
@mock.patch.object(
util.definition,
'site_files',
autospec=True,
return_value=[
'cicd_global_repo/site/cicd/passphrases/passphrase-catalog.yaml',
])
@mock.patch.dict(
os.environ, {
'PEGLEG_PASSPHRASE': 'ytrr89erARAiPE34692iwUMvWqqBvC',
'PEGLEG_SALT': 'MySecretSalt1234567890]['
})
def test_global_passphrase_catalog(*_):
_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(_dir, 'cicd_site_repo'), exist_ok=True)
PassphraseGenerator('cicd', _dir, 'test_author').generate()
for passphrase in TEST_GLOBAL_PASSPHRASES_CATALOG['data']['passphrases']:
passphrase_file_name = '{}.yaml'.format(passphrase['document_name'])
passphrase_file_path = os.path.join(
_dir, 'site', 'cicd', 'secrets', 'passphrases',
passphrase_file_name)
assert os.path.isfile(passphrase_file_path)
with open(passphrase_file_path) as stream:
doc = yaml.safe_load(stream)
assert doc['schema'] == 'pegleg/PeglegManagedDocument/v1'
assert doc['metadata']['storagePolicy'] == 'cleartext'
assert 'encrypted' in doc['data']
assert doc['data']['encrypted']['by'] == 'test_author'
assert 'generated' in doc['data']
assert doc['data']['generated']['by'] == 'test_author'
assert 'managedDocument' in doc['data']
assert doc['data']['managedDocument']['metadata'][
'storagePolicy'] == 'encrypted'
decrypted_passphrase = encryption.decrypt(
doc['data']['managedDocument']['data'],
os.environ['PEGLEG_PASSPHRASE'].encode(),
os.environ['PEGLEG_SALT'].encode())
if passphrase_file_name == "passphrase_from_global.yaml":
assert len(decrypted_passphrase) == 24
@mock.patch.object(
util.definition,
'documents_for_site',
autospec=True,
return_value=TEST_TYPE_SITE_DOCUMENTS)
@mock.patch.object(
pegleg.config,
'get_site_repo',
autospec=True,
return_value='cicd_site_repo')
@mock.patch.object(
util.definition,
'site_files',
autospec=True,
return_value=[
'cicd_global_repo/site/cicd/passphrases/passphrase-catalog.yaml',
])
@mock.patch.dict(
os.environ, {
'PEGLEG_PASSPHRASE': 'ytrr89erARAiPE34692iwUMvWqqBvC',
'PEGLEG_SALT': 'MySecretSalt1234567890]['
})
def test_uuid_passphrase_catalog(*_):
_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(_dir, 'cicd_site_repo'), exist_ok=True)
PassphraseGenerator('cicd', _dir, 'test_author').generate()
for passphrase in TEST_TYPES_CATALOG['data']['passphrases']:
passphrase_file_name = '{}.yaml'.format(passphrase['document_name'])
passphrase_file_path = os.path.join(
_dir, 'site', 'cicd', 'secrets', 'passphrases',
passphrase_file_name)
assert os.path.isfile(passphrase_file_path)
with open(passphrase_file_path) as stream:
doc = yaml.safe_load(stream)
decrypted_passphrase = encryption.decrypt(
doc['data']['managedDocument']['data'],
os.environ['PEGLEG_PASSPHRASE'].encode(),
os.environ['PEGLEG_SALT'].encode())
if passphrase_file_name == "uuid_passphrase_doc.yaml":
assert uuid.UUID(decrypted_passphrase.decode()).version == 4
@mock.patch.object(
util.definition,
'documents_for_site',
autospec=True,
return_value=TEST_PROFILES_SITE_DOCUMENTS)
@mock.patch.object(
pegleg.config,
'get_site_repo',
autospec=True,
return_value='cicd_site_repo')
@mock.patch.object(
util.definition,
'site_files',
autospec=True,
return_value=[
'cicd_global_repo/site/cicd/passphrases/passphrase-catalog.yaml',
])
@mock.patch.dict(
os.environ, {
'PEGLEG_PASSPHRASE': 'ytrr89erARAiPE34692iwUMvWqqBvC',
'PEGLEG_SALT': 'MySecretSalt1234567890]['
})
def test_profiles_catalog(*_):
_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(_dir, 'cicd_site_repo'), exist_ok=True)
PassphraseGenerator('cicd', _dir, 'test_author').generate()
s_util = CryptoString()
for passphrase in TEST_PROFILES_CATALOG['data']['passphrases']:
passphrase_file_name = '{}.yaml'.format(passphrase['document_name'])
passphrase_file_path = os.path.join(
_dir, 'site', 'cicd', 'secrets', 'passphrases',
passphrase_file_name)
assert os.path.isfile(passphrase_file_path)
with open(passphrase_file_path) as stream:
doc = yaml.safe_load(stream)
decrypted_passphrase = encryption.decrypt(
doc['data']['managedDocument']['data'],
os.environ['PEGLEG_PASSPHRASE'].encode(),
os.environ['PEGLEG_SALT'].encode()).decode()
assert len(decrypted_passphrase) == 24
if passphrase_file_name == "default_passphrase.yaml":
assert s_util.has_lower(decrypted_passphrase) is True
assert s_util.has_upper(decrypted_passphrase) is True
assert s_util.has_number(decrypted_passphrase) is True
assert s_util.has_symbol(decrypted_passphrase) is True
bad_symbols = any(
char in '!"$%()*,./:;<>[]^_`{|}~\''
for char in decrypted_passphrase)
assert not bad_symbols
elif passphrase_file_name == "alphanumeric_passphrase.yaml":
assert s_util.has_lower(decrypted_passphrase) is True
assert s_util.has_upper(decrypted_passphrase) is True
assert s_util.has_number(decrypted_passphrase) is True
assert s_util.has_symbol(decrypted_passphrase) is False
elif passphrase_file_name == "alphanumeric_lower_passphrase.yaml":
assert s_util.has_lower(decrypted_passphrase) is True
assert s_util.has_upper(decrypted_passphrase) is False
assert s_util.has_number(decrypted_passphrase) is True
assert s_util.has_symbol(decrypted_passphrase) is False
elif passphrase_file_name == "alphanumeric_upper_passphrase.yaml":
assert s_util.has_lower(decrypted_passphrase) is False
assert s_util.has_upper(decrypted_passphrase) is True
assert s_util.has_number(decrypted_passphrase) is True
assert s_util.has_symbol(decrypted_passphrase) is False
elif passphrase_file_name == "all_passphrase.yaml":
assert s_util.has_lower(decrypted_passphrase) is True
assert s_util.has_upper(decrypted_passphrase) is True
assert s_util.has_number(decrypted_passphrase) is True
assert s_util.has_symbol(decrypted_passphrase) is True
elif passphrase_file_name == "hex_lower_passphrase.yaml":
assert s_util.has_lower(decrypted_passphrase) is True
assert s_util.has_upper(decrypted_passphrase) is False
assert s_util.has_number(decrypted_passphrase) is True
assert s_util.has_symbol(decrypted_passphrase) is False
bad_letters = any(
char in 'ghijklmnopqrstuvwxyz'
for char in decrypted_passphrase)
assert not bad_letters
elif passphrase_file_name == "hex_upper_passphrase.yaml":
assert s_util.has_lower(decrypted_passphrase) is False
assert s_util.has_upper(decrypted_passphrase) is True
assert s_util.has_number(decrypted_passphrase) is True
assert s_util.has_symbol(decrypted_passphrase) is False
bad_letters = any(
char in 'GHIJKLMNOPQRSTUVWXYZ'
for char in decrypted_passphrase)
assert not bad_letters
| 36.221034
| 79
| 0.660384
| 2,180
| 20,320
| 5.910092
| 0.11422
| 0.066361
| 0.023906
| 0.030425
| 0.774604
| 0.738746
| 0.72788
| 0.72788
| 0.719187
| 0.712356
| 0
| 0.012949
| 0.236073
| 20,320
| 560
| 80
| 36.285714
| 0.817046
| 0.034104
| 0
| 0.696721
| 0
| 0
| 0.212847
| 0.0779
| 0
| 0
| 0
| 0
| 0.193989
| 1
| 0.016393
| false
| 0.371585
| 0.038251
| 0
| 0.054645
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
569e98ffa96d39e4f1a104736538b50b8787756f
| 24
|
py
|
Python
|
src/nashpy/polytope/__init__.py
|
Fil/Nashpy
|
405abe23cb655a084ea4a767b97e03fa24c3d5d2
|
[
"MIT"
] | null | null | null |
src/nashpy/polytope/__init__.py
|
Fil/Nashpy
|
405abe23cb655a084ea4a767b97e03fa24c3d5d2
|
[
"MIT"
] | null | null | null |
src/nashpy/polytope/__init__.py
|
Fil/Nashpy
|
405abe23cb655a084ea4a767b97e03fa24c3d5d2
|
[
"MIT"
] | null | null | null |
from .polytope import *
| 12
| 23
| 0.75
| 3
| 24
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3b130451f1db73e57f7e89bc74920b31f11ce479
| 20
|
py
|
Python
|
python_ws/devel/lib/python2.7/dist-packages/turtlebot_arm_lzd/msg/__init__.py
|
13821339565/luo
|
da69d9bd76be18c17c93b33d41ba4f4020a42e50
|
[
"MIT"
] | 1
|
2022-03-11T03:31:15.000Z
|
2022-03-11T03:31:15.000Z
|
VMware_Ubuntu16.04_Dev/ros_ws/devel/lib/python2.7/dist-packages/xtark_line_follower/msg/__init__.py
|
bravetree/xtark_driver_dev
|
1708888161cf20c0d1f45c99d0da4467d69c26c8
|
[
"BSD-3-Clause"
] | null | null | null |
VMware_Ubuntu16.04_Dev/ros_ws/devel/lib/python2.7/dist-packages/xtark_line_follower/msg/__init__.py
|
bravetree/xtark_driver_dev
|
1708888161cf20c0d1f45c99d0da4467d69c26c8
|
[
"BSD-3-Clause"
] | null | null | null |
from ._pos import *
| 10
| 19
| 0.7
| 3
| 20
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 1
| 20
| 20
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3b2d097bd11a8febb8db6983ec5f6f04fc838c5c
| 53,186
|
py
|
Python
|
tests/tests_task_integration.py
|
Open-CMMS/openCMMS_backend
|
56511ebac83a5dc1fb8768a98bc675e88530a447
|
[
"BSD-3-Clause"
] | 3
|
2021-03-08T19:14:38.000Z
|
2022-02-01T17:57:31.000Z
|
tests/tests_task_integration.py
|
Open-CMMS/openCMMS_backend
|
56511ebac83a5dc1fb8768a98bc675e88530a447
|
[
"BSD-3-Clause"
] | null | null | null |
tests/tests_task_integration.py
|
Open-CMMS/openCMMS_backend
|
56511ebac83a5dc1fb8768a98bc675e88530a447
|
[
"BSD-3-Clause"
] | null | null | null |
from io import BytesIO
import pytest
from init_db_tests import init_db
from PIL import Image
from django.contrib.auth.models import Permission
from django.test import TestCase
from maintenancemanagement.models import (
Field,
FieldGroup,
FieldObject,
File,
Task,
)
from maintenancemanagement.serializers import (
EquipmentTypeSerializer,
FileSerializer,
TaskListingSerializer,
TaskSerializer,
TeamSerializer,
)
from openCMMS import settings
from rest_framework.test import APIClient
from usersmanagement.models import Team, UserProfile
User = settings.AUTH_USER_MODEL
class TaskTests(TestCase):
@pytest.fixture(scope="class", autouse=True)
def init_database(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
init_db()
def set_up_perm(self):
"""
Set up a user with permissions
"""
permission = Permission.objects.get(codename='add_task')
permission2 = Permission.objects.get(codename='view_task')
permission3 = Permission.objects.get(codename='delete_task')
permission4 = Permission.objects.get(codename='change_task')
user = UserProfile.objects.create(username='tom')
user.set_password('truc')
user.first_name = 'Tom'
user.save()
user.user_permissions.add(permission)
user.user_permissions.add(permission2)
user.user_permissions.add(permission3)
user.user_permissions.add(permission4)
user.save()
return user
def set_up_without_perm(self):
"""
Set up a user without permissions
"""
user = UserProfile.objects.create(username='tomy')
user.set_password('truc')
user.first_name = 'Tom'
user.save()
return user
def temporary_file(self):
"""
Returns a new temporary image.
"""
file_obj = BytesIO()
image = Image.new('1', (60, 60), 1)
image.save(file_obj, 'png')
file_obj.seek(0)
return file_obj
def add_add_perm_file(self, user):
"""
Add add permission for file
"""
permission = Permission.objects.get(codename='add_file')
user.user_permissions.add(permission)
def test_US5_I1_tasklist_get_with_perm(self):
"""
Test if a user with perm receive the data.
Inputs:
user (UserProfile): a UserProfile we setup with all permissions on tasks.
serializer (TaskSerializer): a TaskSerializer containing all tasks of the database in a Serialized state.
Expected Outputs:
We expect the response's data to be the same dict than the serialiser's data.
"""
user = self.set_up_perm()
tasks = Task.objects.filter(is_template=False)
serializer = TaskSerializer(tasks, many=True)
client = APIClient()
client.force_authenticate(user=user)
response = client.get('/api/maintenancemanagement/tasks/', format='json')
self.assertEqual(response.data, serializer.data)
def test_US5_I1_tasklist_get_only_templates_with_perm(self):
"""
Test if a user with perm receive the data.
Inputs:
user (UserProfile): a UserProfile we setup with all permissions on tasks.
serializer (TaskListingSerializer): a TaskListingSerializer containing all task templates of the database in a Serialized state.
Expected Outputs:
We expect the response's data to be the same dict than the serialiser's data.
"""
user = self.set_up_perm()
tasks = Task.objects.filter(is_template=True)
serializer = TaskListingSerializer(tasks, many=True)
client = APIClient()
client.force_authenticate(user=user)
response = client.get('/api/maintenancemanagement/tasks/', {"template": "true"}, format='json')
self.assertEqual(response.data, serializer.data)
def test_US5_I1_tasklist_get_without_perm(self):
"""
Test if a user without perm doesn't receive the data.
Inputs:
user (UserProfile): a UserProfile we setup with no permissions on tasks.
Expected Outputs:
We expect the response's status_code to be 401.
"""
user = self.set_up_without_perm()
client = APIClient()
client.force_authenticate(user=user)
response = client.get('/api/maintenancemanagement/tasks/', format='json')
self.assertEqual(response.status_code, 401)
def test_US5_I2_tasklist_post_with_perm(self):
"""
Test if a user with perm can add a task.
Inputs:
user (UserProfile): a UserProfile we setup with all permissions on tasks.
data (dict): {
'name': 'verifier pneus',
'description': 'faut verfier les pneus de la voiture ta vu'
}
Expected Outputs:
We expect the response's status_code to be 201.
We expect to find the data we sent in the response data.
"""
user = self.set_up_perm()
data = {'name': 'verifier pneus', 'description': 'faut verfier les pneus de la voiture ta vu'}
client = APIClient()
client.force_authenticate(user=user)
response = client.post('/api/maintenancemanagement/tasks/', data, format='json')
self.assertEqual(response.status_code, 201)
self.assertLessEqual(data.items(), response.data.items())
def test_US5_I2_tasklist_post_without_perm(self):
"""
Test if a user without perm can't add a task.
Inputs:
user (UserProfile): a UserProfile we setup with no permissions on tasks.
Expected Outputs:
We expect the response's status_code to be 401.
"""
user = self.set_up_without_perm()
client = APIClient()
client.force_authenticate(user=user)
response = client.post(
'/api/maintenancemanagement/tasks/', {
'name': 'verifier pneus',
'description': 'faut verfier les pneus de la voiture ta vu'
},
format='json'
)
self.assertEqual(response.status_code, 401)
def test_US5_I3_taskdetail_get_with_perm(self):
"""
Test if a user with perm can see a task detail.
Inputs:
user (UserProfile): a UserProfile we setup with all permissions on tasks.
data (dict): {
'name': 'verifier pneus',
'description': 'faut verfier les pneus de la voiture ta vu'
}
Expected Outputs:
We expect the response's status_code to be 201.
We expect to find the data we sent in the response data.
"""
user = self.set_up_perm()
data = {'name': 'verifier pneus', 'description': 'faut verfier les pneus de la voiture ta vu'}
client = APIClient()
client.force_authenticate(user=user)
response1 = client.post('/api/maintenancemanagement/tasks/', data, format='json')
pk = response1.data['id']
response = client.get(f'/api/maintenancemanagement/tasks/{pk}/')
self.assertLessEqual(data.items(), response.data.items())
def test_US5_I3_taskdetail_get_non_existing_task_with_perm(self):
"""
Test if a user with perm can't see an unavailable task detail.
Inputs:
user (UserProfile): a UserProfile we setup with all permissions on tasks.
10506466 (int): a number we know can't be a task id in our tests
Expected Output:
We expect the response's status_code to be 404.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
response = client.get('/api/maintenancemanagement/tasks/10506466/')
self.assertEqual(response.status_code, 404)
def test_US5_I3_taskdetail_get_without_perm(self):
"""
Test if a user without perm can't see a task detail.
Inputs:
user (UserProfile): a UserProfile we setup with all permissions on tasks then no permissions.
Expected Outputs:
We expect the GET response's status_code to be 401.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
response1 = client.post(
'/api/maintenancemanagement/tasks/', {
'name': 'verifier pneus',
'description': 'faut verfier les pneus de la voiture ta vu'
},
format='json'
)
pk = response1.data['id']
user.user_permissions.clear()
user = UserProfile.objects.get(id=user.pk)
client.force_authenticate(user=user)
response = client.get(f'/api/maintenancemanagement/tasks/{pk}/')
self.assertEqual(response.status_code, 401)
def test_US5_I4_taskdetail_put_with_perm(self):
"""
Test if a user with perm can change a task.
Inputs:
user (UserProfile): a UserProfile we setup with all permissions on tasks.
data_post (dict): {'name': 'verifier pneus', 'description': 'faut verfier les pneus de la voiture ta vu'}
data_put (dict): {'name': 'verifier roues'}
Expected Outputs:
We expect to find the same id in the data of response_post and response_put.
We expect the PUT response's data to contain the same value for 'name' than the put data.
We expect the PUT response's status_code to be 200.
We expect the GET response's data to contain the same value for 'name' than the put data.
"""
user = self.set_up_perm()
data_post = {'name': 'verifier pneus', 'description': 'faut verfier les pneus de la voiture ta vu'}
data_put = {'name': 'verifier roues'}
client = APIClient()
client.force_authenticate(user=user)
response_post = client.post('/api/maintenancemanagement/tasks/', data_post, format='json')
pk = response_post.data['id']
response_put = client.put(f'/api/maintenancemanagement/tasks/{pk}/', data_put, format='json')
self.assertEqual(response_put.data['id'], response_post.data['id'])
self.assertEqual(response_put.data['name'], data_put['name'])
self.assertEqual(response_put.status_code, 200)
response_get = client.get(f'/api/maintenancemanagement/tasks/{pk}/')
self.assertEqual(response_get.data['name'], data_put['name'])
def test_US5_I4_taskdetail_put_non_existing_task_with_perm(self):
"""
Test if a user with perm can't change an unavailable task.
Inputs:
user (UserProfile): a UserProfile we setup with all permissions on tasks.
Expected Outputs:
We expect the PUT response's status_code to be 404.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
client.post(
'/api/maintenancemanagement/tasks/', {
'name': 'verifier pneus',
'description': 'faut verfier les pneus de la voiture ta vu'
},
format='json'
)
response = client.put('/api/maintenancemanagement/tasks/644687456/', {'name': 'verifier roues'}, format='json')
self.assertEqual(response.status_code, 404)
def test_US5_I4_taskdetail_put_without_perm(self):
"""
Test if a user without perm can change a task.
Inputs:
user (UserProfile): a UserProfile we setup with no permissions on tasks.
Expected Outputs:
We expect the response's status_code to be 401.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
response1 = client.post(
'/api/maintenancemanagement/tasks/', {
'name': 'verifier pneus',
'description': 'faut verfier les pneus de la voiture ta vu'
},
format='json'
)
pk = response1.data['id']
user.user_permissions.clear()
user = UserProfile.objects.get(id=user.pk)
client.force_authenticate(user=user)
response = client.put(f'/api/maintenancemanagement/tasks/{pk}/', {'name': 'verifier roues'}, format='json')
self.assertEqual(response.status_code, 401)
def test_US5_I4_taskdetail_put_with_end_condition_with_perm(self):
"""
Test if a user with perm can change a task.
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
conditions = Field.objects.filter(field_group=FieldGroup.objects.get(name="End Conditions"))
checkbox = conditions.get(name="Checkbox")
entier = conditions.get(name="Integer")
response1 = client.post(
'/api/maintenancemanagement/tasks/', {
'name':
'verifier pneus',
'description':
'faut verfier les pneus de la voiture ta vu',
'end_conditions':
[
{
"field": checkbox.id,
"value": "false",
"description": "test_update_task_with_perm_with_end_conditions_1"
},
{
"field": entier.id,
"value": 0,
"description": "test_update_task_with_perm_with_end_conditions_2"
},
]
},
format='json'
)
pk = response1.data['id']
response = client.put(
'/api/maintenancemanagement/tasks/' + str(pk) + '/', {
'name':
'verifier roues',
'duration':
'30d',
'end_conditions':
[
{
"id": FieldObject.objects.get(field=checkbox).id,
"value": "false",
"description": "maj_checkbox"
},
{
"id": FieldObject.objects.get(field=entier).id,
"value": 10,
"description": "maj_entier"
},
]
},
format='json'
)
self.assertEqual(response1.status_code, 201) # if the post is failing in this test we know it is.
self.assertEqual(response.data['name'], 'verifier roues')
self.assertEqual(response.status_code, 200)
def test_US5_I5_taskdetail_delete_with_perm(self):
"""
Test if a user with perm can delete a task
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
response1 = client.post(
'/api/maintenancemanagement/tasks/', {
'name': 'verifier pneus',
'description': 'faut verfier les pneus de la voiture ta vu'
},
format='json'
)
pk = response1.data['id']
response = client.delete('/api/maintenancemanagement/tasks/' + str(pk) + '/')
self.assertEqual(response.status_code, 204)
def test_US5_I5_taskdetail_delete_non_existing_task_with_perm(self):
"""
Test if a user with perm can't delete an unavailable task
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
response = client.delete('/api/maintenancemanagement/tasks/' + str(6546546) + '/')
self.assertEqual(response.status_code, 404)
def test_US5_I5_taskdetail_delete_without_perm(self):
"""
Test if a user without perm can delete a task.
Inputs:
user (UserProfile): a UserProfile we setup with no permissions on tasks.
Expected Outputs:
We expect the response's status_code to be 401.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
response1 = client.post(
'/api/maintenancemanagement/tasks/', {
'name': 'verifier pneus',
'description': 'faut verfier les pneus de la voiture ta vu'
},
format='json'
)
pk = response1.data['id']
user.user_permissions.clear()
user = UserProfile.objects.get(id=user.pk)
client.force_authenticate(user=user)
response = client.delete('/api/maintenancemanagement/tasks/' + str(pk) + '/')
self.assertEqual(response.status_code, 401)
def test_US6_I1_addteamtotask_post_with_perm(self):
"""
Test if a user with permission can add a team to a task.
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
team = Team.objects.create(name="team")
task = Task.objects.create(name="tache")
response = client.post(
'/api/maintenancemanagement/addteamtotask', {
"id_team": f"{team.pk}",
"id_task": f"{task.pk}"
},
format="json"
)
self.assertEqual(response.status_code, 201)
def test_US6_I1_addteamtotask_post_without_perm(self):
"""
Test if a user without permission can't add a team to a task.
Inputs:
user (UserProfile): a UserProfile we setup with no permissions on tasks.
Expected Outputs:
We expect the response's status_code to be 401.
"""
user = self.set_up_perm()
user.user_permissions.clear()
client = APIClient()
client.force_authenticate(user=user)
team = Team.objects.create(name="team")
task = Task.objects.create(name="tache")
response = client.post(
'/api/maintenancemanagement/addteamtotask', {
"id_team": f"{team.pk}",
"id_task": f"{task.pk}"
},
format="json"
)
self.assertEqual(response.status_code, 401)
def test_US6_I1_addteamtotask_put_with_perm(self):
"""
Test if a user with permission can remove a team from a task.
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
team = Team.objects.create(name="team")
task = Task.objects.create(name="tache")
response = client.put(
'/api/maintenancemanagement/addteamtotask', {
"id_team": f"{team.pk}",
"id_task": f"{task.pk}"
},
format="json"
)
self.assertEqual(response.status_code, 201)
def test_US6_I1_addteamtotask_put_with_perm(self):
"""
Test if a user without permission can't remove a team from a task.
Inputs:
user (UserProfile): a UserProfile we setup with no permissions on tasks.
Expected Outputs:
We expect the response's status_code to be 401.
"""
user = self.set_up_without_perm()
client = APIClient()
client.force_authenticate(user=user)
team = Team.objects.create(name="team")
task = Task.objects.create(name="tache")
response = client.put(
'/api/maintenancemanagement/addteamtotask', {
"id_team": f"{team.pk}",
"id_task": f"{task.pk}"
},
format="json"
)
self.assertEqual(response.status_code, 401)
def test_US6_I3_teamtaskslist_get_with_perm(self):
"""
Test if a user with permission can view team's task
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
team = Team.objects.create(name="team")
task = Task.objects.create(name="task")
task.teams.add(team)
task.save()
tasks = team.task_set.all()
serializer = TaskSerializer(tasks, many=True)
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
response = client.get("/api/maintenancemanagement/teamtasklist/" + str(team.pk), format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, serializer.data)
def test_US6_I3_teamtaskslist_get_non_existing_team_with_perm(self):
"""
Test if a user with permission can't view non existing team's task
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
response = client.get("/api/maintenancemanagement/teamtasklist/" + str(65465464), format='json')
self.assertEqual(response.status_code, 404)
def test_US6_I3_teamtaskslist_get_without_perm(self):
"""
Test if a user without permission can't view team's task.
Inputs:
user (UserProfile): a UserProfile we setup with no permissions on tasks.
Expected Outputs:
We expect the response's status_code to be 401.
"""
user = self.set_up_without_perm()
client = APIClient()
client.force_authenticate(user=user)
team = Team.objects.create(name="team")
task = Task.objects.create(name="task")
response = client.get(f'/api/maintenancemanagement/teamtasklist/{team.pk}', format='json')
self.assertEqual(response.status_code, 401)
def test_US6_I4_usertaskslist_get_with_perm(self):
"""
Tests if a user with permission can access a user's task list.
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
team = Team.objects.create(name="team")
team2 = Team.objects.create(name="team2")
task = Task.objects.create(name="task")
task.teams.add(team)
task.save()
user = self.set_up_perm()
team.user_set.add(user)
team.save()
team2.user_set.add(user)
team2.save()
tasks = team.task_set.all()
tasks2 = team2.task_set.all()
serializer = TaskListingSerializer(tasks, many=True)
serializer2 = TaskListingSerializer(tasks2, many=True)
client = APIClient()
client.force_authenticate(user=user)
response = client.get(f"/api/maintenancemanagement/usertasklist/{user.pk}", format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, serializer.data + serializer2.data)
def test_US6_I4_usertaskslist_get_non_existing_user_with_perm(self):
"""
Tests if a user with permission can't access a non existing user's task list.
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
response = client.get(f"/api/maintenancemanagement/usertasklist/{6546874}", format='json')
self.assertEqual(response.status_code, 404)
def test_US6_I4_usertaskslist_get_without_perm(self):
"""
Test if a user without permissions can see his own tasks.
Inputs:
user (UserProfile): a UserProfile we setup with no permissions on tasks.
Expected Outputs:
We expect the response's status_code to be 401.
"""
temp_user = self.set_up_perm()
user = self.set_up_without_perm()
client = APIClient()
client.force_authenticate(user=user)
team = Team.objects.create(name="team")
team.user_set.add(user)
team.save()
response = client.get(f'/api/maintenancemanagement/usertasklist/{temp_user.pk}', format='json')
self.assertEqual(response.status_code, 401)
def test_US8_I1_tasklist_post_with_file_with_perm(self):
"""
Test if a user with perm can add a task with a file
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
self.add_add_perm_file(user)
client = APIClient()
client.force_authenticate(user=user)
data = {'file': self.temporary_file(), 'is_manual': 'False'}
response1 = client.post("/api/maintenancemanagement/files/", data, format='multipart')
pk = response1.data['id']
response = client.post(
'/api/maintenancemanagement/tasks/', {
'name': 'verifier pneus',
'description': 'faut verfier les pneus de la voiture ta vu',
'files': [pk]
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['files'], [pk])
def test_US8_I1_tasklist_post_with_file_without_perm(self):
"""
Test if a user without perm can't add a task with a file.
Inputs:
user (UserProfile): a UserProfile we setup with no permissions on tasks.
Expected Outputs:
We expect the response's status_code to be 401.
"""
user = self.set_up_without_perm()
self.add_add_perm_file(user)
client = APIClient()
client.force_authenticate(user=user)
data = {'file': self.temporary_file(), 'is_manual': 'False'}
response1 = client.post("/api/maintenancemanagement/files/", data, format='multipart')
pk = response1.data['id']
response = client.post(
'/api/maintenancemanagement/tasks/', {
'name': 'verifier pneus',
'description': 'faut verfier les pneus de la voiture ta vu',
'files': [pk]
},
format='json'
)
self.assertEqual(response.status_code, 401)
def test_US8_I2_taskdetail_get_with_file_with_perm(self):
"""
Test if a user with perm can see a task detail with a file
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
self.add_add_perm_file(user)
client = APIClient()
client.force_authenticate(user=user)
data = {'file': self.temporary_file(), 'is_manual': 'False'}
response1 = client.post("/api/maintenancemanagement/files/", data, format='multipart')
pk_file = response1.data['id']
response1 = client.post(
'/api/maintenancemanagement/tasks/', {
'name': 'verifier pneus',
'description': 'faut verfier les pneus de la voiture ta vu',
'files': [pk_file]
},
format='json'
)
pk = response1.data['id']
response = client.get('/api/maintenancemanagement/tasks/' + str(pk) + '/')
files = File.objects.filter(pk=pk_file)
self.assertEqual(response.data['files'], FileSerializer(files, many=True).data)
def test_US8_I2_taskdetail_get_with_file_withouts_perm(self):
"""
Test if a user without perm can't see a task detail with a file.
Inputs:
user (UserProfile): a UserProfile we setup with no permissions on tasks.
Expected Outputs:
We expect the response's status_code to be 401.
"""
user = self.set_up_perm()
self.add_add_perm_file(user)
client = APIClient()
client.force_authenticate(user=user)
data = {'file': self.temporary_file(), 'is_manual': 'False'}
response1 = client.post("/api/maintenancemanagement/files/", data, format='multipart')
pk_file = response1.data['id']
response1 = client.post(
'/api/maintenancemanagement/tasks/', {
'name': 'verifier pneus',
'description': 'faut verfier les pneus de la voiture ta vu',
'files': [pk_file]
},
format='json'
)
pk = response1.data['id']
user.user_permissions.clear()
user = UserProfile.objects.get(id=user.pk)
client.force_authenticate(user=user)
response = client.get('/api/maintenancemanagement/tasks/' + str(pk) + '/')
self.assertEqual(response.status_code, 401)
def test_US8_I1_tasklist_post_with_files_with_perm(self):
"""
Test if a user with perm can add a task with multiple files
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
self.add_add_perm_file(user)
client = APIClient()
client.force_authenticate(user=user)
data = {'file': self.temporary_file(), 'is_manual': 'False'}
data2 = {'file': self.temporary_file(), 'is_manual': 'False'}
response1 = client.post("/api/maintenancemanagement/files/", data, format='multipart')
response2 = client.post("/api/maintenancemanagement/files/", data2, format='multipart')
pk_1 = response1.data['id']
pk_2 = response2.data['id']
response = client.post(
'/api/maintenancemanagement/tasks/', {
'name': 'verifier pneus',
'description': 'faut verfier les pneus de la voiture ta vu',
'files': [pk_1, pk_2]
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['files'], [pk_1, pk_2])
def test_US8_I1_tasklist_post_with_files_without_perm(self):
"""
Test if a user without perm can't add a task with multiple files.
Inputs:
user (UserProfile): a UserProfile we setup with no permissions on tasks.
Expected Outputs:
We expect the response's status_code to be 401.
"""
user = self.set_up_without_perm()
self.add_add_perm_file(user)
client = APIClient()
client.force_authenticate(user=user)
data = {'file': self.temporary_file(), 'is_manual': 'False'}
data2 = {'file': self.temporary_file(), 'is_manual': 'False'}
response1 = client.post("/api/maintenancemanagement/files/", data, format='multipart')
response2 = client.post("/api/maintenancemanagement/files/", data2, format='multipart')
pk_1 = response1.data['id']
pk_2 = response2.data['id']
response = client.post(
'/api/maintenancemanagement/tasks/', {
'name': 'verifier pneus',
'description': 'faut verfier les pneus de la voiture ta vu',
'files': [pk_1, pk_2]
},
format='json'
)
self.assertEqual(response.status_code, 401)
def test_US8_I2_taskdetail_get_with_files_with_perm(self):
"""
Test if a user with perm can see a task detail with multiple files
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
self.add_add_perm_file(user)
client = APIClient()
client.force_authenticate(user=user)
data = {'file': self.temporary_file(), 'is_manual': 'False'}
data2 = {'file': self.temporary_file(), 'is_manual': 'False'}
response1 = client.post("/api/maintenancemanagement/files/", data, format='multipart')
response2 = client.post("/api/maintenancemanagement/files/", data2, format='multipart')
pk_1 = response1.data['id']
pk_2 = response2.data['id']
response = client.post(
'/api/maintenancemanagement/tasks/', {
'name': 'verifier pneus',
'description': 'faut verfier les pneus de la voiture ta vu',
'files': [pk_1, pk_2]
},
format='json'
)
pk = response.data['id']
response = client.get('/api/maintenancemanagement/tasks/' + str(pk) + '/')
files = File.objects.filter(pk__in=[pk_1, pk_2])
self.assertEqual(response.data['files'], FileSerializer(files, many=True).data)
def test_US8_I2_taskdetail_get_with_files_without_perm(self):
"""
Test if a user without perm can't see a task detail with multiple files.
Inputs:
user (UserProfile): a UserProfile we setup with no permissions on tasks.
Expected Outputs:
We expect the response's status_code to be 401.
"""
user = self.set_up_perm()
self.add_add_perm_file(user)
client = APIClient()
client.force_authenticate(user=user)
data = {'file': self.temporary_file(), 'is_manual': 'False'}
data2 = {'file': self.temporary_file(), 'is_manual': 'False'}
response1 = client.post("/api/maintenancemanagement/files/", data, format='multipart')
response2 = client.post("/api/maintenancemanagement/files/", data2, format='multipart')
pk_1 = response1.data['id']
pk_2 = response2.data['id']
response = client.post(
'/api/maintenancemanagement/tasks/', {
'name': 'verifier pneus',
'description': 'faut verfier les pneus de la voiture ta vu',
'files': [pk_1, pk_2]
},
format='json'
)
pk = response.data['id']
user.user_permissions.clear()
user = UserProfile.objects.get(id=user.pk)
client.force_authenticate(user=user)
response = client.get('/api/maintenancemanagement/tasks/' + str(pk) + '/')
self.assertEqual(response.status_code, 401)
def test_US9_I1_tasklist_post_with_trigger_conditions_with_perm(self):
"""
Test if a user with perm can add a task with trigger_conditions
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
conditions = Field.objects.filter(field_group=FieldGroup.objects.get(name="Trigger Conditions"))
field_object = FieldObject.objects.get(field=Field.objects.get(name="Nb bouteilles"))
nb_bouteilles_value = float(field_object.value)
response = client.post(
'/api/maintenancemanagement/tasks/', {
'name':
'verifier pneus',
'description':
'desc_task_test_add_task_with_perm_with_trigger_conditions',
'trigger_conditions':
[
{
"field": conditions.get(name="Recurrence").id,
"value": "30d",
"delay": "7d",
"description": "test_add_task_with_perm_with_trigger_conditions_recurrence"
}, {
'field': conditions.get(name='Above Threshold').id,
'value': '0.6',
'field_object_id': field_object.id,
'delay': '2d',
'description': 'test_add_task_with_perm_with_trigger_conditions_above_threshold'
}, {
'field': conditions.get(name='Under Threshold').id,
'value': '0.6',
'field_object_id': field_object.id,
'delay': '2d',
'description': 'test_add_task_with_perm_with_trigger_conditions_under_threshold'
}, {
'field': conditions.get(name='Frequency').id,
'value': '10000',
'field_object_id': field_object.id,
'delay': '2d',
'description': 'test_add_task_with_perm_with_trigger_conditions_frequency'
}
]
},
format='json'
)
self.assertEqual(response.status_code, 201)
task = Task.objects.get(description="desc_task_test_add_task_with_perm_with_trigger_conditions")
self.assertFalse(task.is_triggered)
field_object1 = FieldObject.objects.get(
description="test_add_task_with_perm_with_trigger_conditions_recurrence"
)
field_object2 = FieldObject.objects.get(
description="test_add_task_with_perm_with_trigger_conditions_above_threshold"
)
field_object3 = FieldObject.objects.get(
description="test_add_task_with_perm_with_trigger_conditions_under_threshold"
)
field_object4 = FieldObject.objects.get(
description="test_add_task_with_perm_with_trigger_conditions_frequency"
)
self.assertEqual(field_object1.described_object, task)
self.assertEqual(field_object1.value, '30d|7d')
self.assertEqual(field_object2.value, f'0.6|{field_object.id}|2d')
self.assertEqual(field_object3.value, f'0.6|{field_object.id}|2d')
self.assertEqual(field_object4.value, f'10000|{field_object.id}|2d|{nb_bouteilles_value + 10000}')
def test_US9_I1_tasklist_post_with_trigger_conditions_with_perm_and_wrong_values_1(self):
"""
Test if a user with perm can't add a task with trigger_conditions with bad values.
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
conditions = Field.objects.filter(field_group=FieldGroup.objects.get(name="Trigger Conditions"))
response = client.post(
'/api/maintenancemanagement/tasks/', {
'name':
'verifier pneus',
'description':
'desc_task_test_add_task_with_perm_with_trigger_conditions',
'trigger_conditions':
[
{
"field": conditions.get(name="Recurrence").id,
"value": "30d",
"description": "test_add_task_with_perm_with_trigger_conditions_recurrence"
}
]
},
format='json'
)
self.assertEqual(response.status_code, 400)
def test_US9_I1_tasklist_post_with_trigger_conditions_with_perm_and_wrong_values_2(self):
"""
Test if a user with perm can't add a task with trigger_conditions with bad values.
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
conditions = Field.objects.filter(field_group=FieldGroup.objects.get(name="Trigger Conditions"))
response = client.post(
'/api/maintenancemanagement/tasks/', {
'name':
'verifier pneus',
'description':
'desc_task_test_add_task_with_perm_with_trigger_conditions',
'trigger_conditions':
[
{
"field": conditions.get(name="Recurrence").id,
"value": "30d",
'delay': '2d',
"description": "test_add_task_with_perm_with_trigger_conditions_recurrence",
'field_object_id': 1,
}
]
},
format='json'
)
self.assertEqual(response.status_code, 400)
def test_US9_I1_tasklist_post_with_trigger_conditions_with_perm_and_wrong_values_3(self):
"""
Test if a user with perm can't add a task with trigger_conditions with bad values.
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
conditions = Field.objects.filter(field_group=FieldGroup.objects.get(name="Trigger Conditions"))
response = client.post(
'/api/maintenancemanagement/tasks/', {
'name':
'verifier pneus',
'description':
'desc_task_test_add_task_with_perm_with_trigger_conditions',
'trigger_conditions':
[
{
'field': conditions.get(name='Above Threshold').id,
'value': '0.6',
'field_object_id': 1,
'description': 'test_add_task_with_perm_with_trigger_conditions_above_threshold'
}
]
},
format='json'
)
self.assertEqual(response.status_code, 400)
def test_US9_I1_tasklist_post_with_trigger_conditions_with_perm_and_wrong_values_4(self):
"""
Test if a user with perm can't add a task with trigger_conditions with bad values.
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
conditions = Field.objects.filter(field_group=FieldGroup.objects.get(name="Trigger Conditions"))
response = client.post(
'/api/maintenancemanagement/tasks/', {
'name':
'verifier pneus',
'description':
'desc_task_test_add_task_with_perm_with_trigger_conditions',
'trigger_conditions':
[
{
'field': conditions.get(name='Above Threshold').id,
'value': '0.6',
'delay': '2d',
'description': 'test_add_task_with_perm_with_trigger_conditions_above_threshold'
}
]
},
format='json'
)
self.assertEqual(response.status_code, 400)
def test_US11_I1_tasklist_post_with_end_conditions_with_perm(self):
"""
Test if a user with perm can add a task with end_conditions
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
conditions = Field.objects.filter(field_group=FieldGroup.objects.get(name="End Conditions"))
response = client.post(
'/api/maintenancemanagement/tasks/', {
'name':
'verifier pneus',
'description':
'desc_task_test_add_task_with_perm_with_end_conditions',
'end_conditions':
[
{
"field": conditions.get(name="Checkbox").id,
"value": "false",
"description": "test_add_task_with_perm_with_end_conditions_1"
},
{
"field": conditions.get(name="Integer").id,
"value": 0,
"description": "test_add_task_with_perm_with_end_conditions_2"
},
]
},
format='json'
)
self.assertEqual(response.status_code, 201)
task = Task.objects.get(description="desc_task_test_add_task_with_perm_with_end_conditions")
self.assertTrue(task.is_triggered)
field_object_1 = FieldObject.objects.get(description="test_add_task_with_perm_with_end_conditions_1")
field_object_2 = FieldObject.objects.get(description="test_add_task_with_perm_with_end_conditions_2")
self.assertEqual(field_object_1.described_object, task)
self.assertEqual(field_object_2.described_object, task)
def test_US10_11_I1_tasklist_post_with_end_and_trigger_conditions_with_perm(self):
"""
Test if a user with perm can add a task with trigger_condition and end_condition
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
trigger_conditions = Field.objects.filter(field_group=FieldGroup.objects.get(name="Trigger Conditions"))
end_conditions = Field.objects.filter(field_group=FieldGroup.objects.get(name="End Conditions"))
response = client.post(
'/api/maintenancemanagement/tasks/',
{
'name':
'verifier pneus',
'description':
'desc_task_test_add_task_with_perm_with_trigger_and_end__conditions',
'trigger_conditions':
[
{
"field": trigger_conditions.get(name="Recurrence").id,
# "field_object_id": 2, # Si on le met pas ça dit qu'il est requis, si on le mets ça plante
"value": "30d",
"delay": "14d",
"description": "test_add_task_with_perm_with_trigger_and_end_conditions_1"
}
],
'end_conditions':
[
{
"field": end_conditions.get(name="Checkbox").id,
"value": "false",
"description": "test_add_task_with_perm_with_trigger_and_end_conditions_2"
}
]
},
format='json'
)
self.assertEqual(response.status_code, 201)
task = Task.objects.get(description="desc_task_test_add_task_with_perm_with_trigger_and_end__conditions")
field_object_1 = FieldObject.objects.get(
description="test_add_task_with_perm_with_trigger_and_end_conditions_1"
)
field_object_2 = FieldObject.objects.get(
description="test_add_task_with_perm_with_trigger_and_end_conditions_2"
)
self.assertEqual(field_object_1.described_object, task)
self.assertEqual(field_object_2.described_object, task)
def test_US10_11_I1_tasklist_post_with_conditions_with_bad_values_with_perm(self):
"""
Test if a user with perm can add a task with conditons with bad values
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
conditions = Field.objects.filter(field_group=FieldGroup.objects.get(name="Trigger Conditions"))
response = client.post(
'/api/maintenancemanagement/tasks/', {
'name':
'verifier pneus',
'description':
'desc_task_test_add_task_with_perm_with_conditions_with_bad_values',
'trigger_conditions':
[
{
"field": conditions.get(name="Recurrence").id,
"value": "BAD_VALUE",
"description": "test_add_task_with_perm_with_conditions_with_bad_values"
}
]
},
format='json'
)
self.assertEqual(response.status_code, 400)
def test_US19_I1_taskrequirements_with_perm(self):
"""
Test if a user can get template requirements with permission
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
response = client.get('/api/maintenancemanagement/tasks/requirements')
trigger_conditions = response.data['trigger_conditions']
end_conditions = response.data['end_conditions']
template = Task.objects.get(name='TemplateTest')
template_json = {
'id': template.id,
'name': template.name,
'end_date': template.end_date,
'description': template.description,
'duration': '2d',
'is_template': template.is_template,
'equipment': None,
'files': FileSerializer(template.files.all(), many=True).data,
'teams': TeamSerializer(template.teams.all(), many=True).data,
'equipment_type': EquipmentTypeSerializer(template.equipment_type).data,
'over': template.over,
'trigger_conditions': [],
'end_conditions': []
}
self.assertEqual(response.status_code, 200)
self.assertEqual(
len(trigger_conditions),
len(Field.objects.filter(field_group=FieldGroup.objects.get(name="Trigger Conditions")))
)
self.assertEqual(
len(end_conditions), len(Field.objects.filter(field_group=FieldGroup.objects.get(name="End Conditions")))
)
self.assertTrue(template_json in response.json().get('task_templates'))
def test_US19_I1_taskrequirements_without_perm(self):
"""
Test if a user can get template requirements without permission.
Inputs:
user (UserProfile): a UserProfile we setup with no permissions on tasks.
Expected Outputs:
We expect the response's status_code to be 401.
"""
user = self.set_up_without_perm()
client = APIClient()
client.force_authenticate(user=user)
response = client.get('/api/maintenancemanagement/tasks/requirements')
self.assertEqual(response.status_code, 401)
def test_US11_I2_tasklist_post_with_no_end_condition(self):
"""
Test that a checkbox is created if no end_conditions are given
Inputs:
user (UserProfile): a user with all permissions on tasks.
"""
user = self.set_up_perm()
client = APIClient()
client.force_authenticate(user=user)
conditions = Field.objects.filter(field_group=FieldGroup.objects.get(name="End Conditions"))
response = client.post(
'/api/maintenancemanagement/tasks/', {
'name': 'verifier pneus',
'description': 'desc_task_test_tasklist_post_with_no_end_condition',
'end_conditions': []
},
format='json'
)
self.assertEqual(response.status_code, 201)
task = Task.objects.get(description="desc_task_test_tasklist_post_with_no_end_condition")
check_box = FieldObject.objects.get(field=conditions.get(name="Checkbox"))
self.assertEqual(check_box.described_object, task)
| 41.325563
| 148
| 0.572256
| 5,679
| 53,186
| 5.156894
| 0.052122
| 0.023492
| 0.01598
| 0.045175
| 0.880011
| 0.853616
| 0.824353
| 0.815714
| 0.80588
| 0.789456
| 0
| 0.014128
| 0.330613
| 53,186
| 1,286
| 149
| 41.357698
| 0.808466
| 0.192663
| 0
| 0.611047
| 0
| 0
| 0.208466
| 0.112686
| 0
| 0
| 0
| 0
| 0.079402
| 1
| 0.056387
| false
| 0.002302
| 0.012658
| 0
| 0.073648
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3b2ea130fa6c7d83e5acc33a3c12edb00e53241c
| 70
|
py
|
Python
|
mvfy/visual/utils/__init__.py
|
erwingforerocastro/mvfy_visual_py
|
8740f21ffa68d0cfced0d0684251b2198488cb0e
|
[
"MIT"
] | null | null | null |
mvfy/visual/utils/__init__.py
|
erwingforerocastro/mvfy_visual_py
|
8740f21ffa68d0cfced0d0684251b2198488cb0e
|
[
"MIT"
] | null | null | null |
mvfy/visual/utils/__init__.py
|
erwingforerocastro/mvfy_visual_py
|
8740f21ffa68d0cfced0d0684251b2198488cb0e
|
[
"MIT"
] | null | null | null |
from detectors import *
from receivers import *
from streamer import *
| 23.333333
| 23
| 0.8
| 9
| 70
| 6.222222
| 0.555556
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157143
| 70
| 3
| 24
| 23.333333
| 0.949153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3b35fabd72bb76b475fce2a4e2ae1a7f2fecae94
| 39
|
py
|
Python
|
jinahub/encoders/text/CLIPTextEncoder/__init__.py
|
vivek2301/executors
|
8159681d68408ab8f797497bc3374be77e6ca392
|
[
"Apache-2.0"
] | null | null | null |
jinahub/encoders/text/CLIPTextEncoder/__init__.py
|
vivek2301/executors
|
8159681d68408ab8f797497bc3374be77e6ca392
|
[
"Apache-2.0"
] | null | null | null |
jinahub/encoders/text/CLIPTextEncoder/__init__.py
|
vivek2301/executors
|
8159681d68408ab8f797497bc3374be77e6ca392
|
[
"Apache-2.0"
] | null | null | null |
from .clip_text import CLIPTextEncoder
| 19.5
| 38
| 0.871795
| 5
| 39
| 6.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3b4bbeb4322a11827430b3126df4b6c3cc7e948a
| 35
|
py
|
Python
|
contrib/discodex/www/views.py
|
kostis/disco
|
200ca4afef9851139b122928e409d1d3186be646
|
[
"BSD-3-Clause"
] | 1
|
2016-08-23T06:45:18.000Z
|
2016-08-23T06:45:18.000Z
|
contrib/discodex/www/views.py
|
dimazest/disco
|
9175f863d6f83f2a918c851c9eed88019adf7f24
|
[
"BSD-3-Clause"
] | null | null | null |
contrib/discodex/www/views.py
|
dimazest/disco
|
9175f863d6f83f2a918c851c9eed88019adf7f24
|
[
"BSD-3-Clause"
] | null | null | null |
from discodex.views import Indices
| 17.5
| 34
| 0.857143
| 5
| 35
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8e6a5a50f56e1cd48e4364000b60218d0803e125
| 279
|
py
|
Python
|
cantools/database/can/__init__.py
|
juleq/cantools
|
c34fbf8423e684fdef7a6b34789b6f0607bfc581
|
[
"MIT"
] | 1
|
2021-05-15T16:24:51.000Z
|
2021-05-15T16:24:51.000Z
|
cantools/database/can/__init__.py
|
juleq/cantools
|
c34fbf8423e684fdef7a6b34789b6f0607bfc581
|
[
"MIT"
] | null | null | null |
cantools/database/can/__init__.py
|
juleq/cantools
|
c34fbf8423e684fdef7a6b34789b6f0607bfc581
|
[
"MIT"
] | null | null | null |
from .database import Database
from .message import Message
from .message import EncodeError
from .message import DecodeError
from .signal import Signal
from .node import Node
# ToDo: Remove backwards compatibility File in future release.
from .database import Database as File
| 27.9
| 62
| 0.820789
| 38
| 279
| 6.026316
| 0.447368
| 0.144105
| 0.222707
| 0.227074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.143369
| 279
| 9
| 63
| 31
| 0.958159
| 0.215054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8e80a5e0cf21b917a09d9a653b8d781f955a0545
| 132
|
py
|
Python
|
spacetimeformer/data/__init__.py
|
Piki1989/spacetimeformer
|
7e0caf17dd03e5d25e2766c4f7132805779bcc40
|
[
"MIT"
] | 1
|
2022-01-18T07:08:41.000Z
|
2022-01-18T07:08:41.000Z
|
spacetimeformer/data/__init__.py
|
Piki1989/spacetimeformer
|
7e0caf17dd03e5d25e2766c4f7132805779bcc40
|
[
"MIT"
] | 1
|
2022-03-23T01:16:37.000Z
|
2022-03-23T02:20:39.000Z
|
spacetimeformer/data/__init__.py
|
Piki1989/spacetimeformer
|
7e0caf17dd03e5d25e2766c4f7132805779bcc40
|
[
"MIT"
] | null | null | null |
from . import timefeatures
from . import precip
from . import metr_la
from .datamodule import DataModule
from .csv_dataset import *
| 22
| 34
| 0.80303
| 18
| 132
| 5.777778
| 0.5
| 0.288462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 132
| 5
| 35
| 26.4
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.