hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eca6f50a5eadc5345a5e9f63605ec159a4988d31
| 5,371
|
py
|
Python
|
EmissionTestPredictionService.py
|
plpriyanka/Emission-Test-Prediction-service
|
5605e38b65b3cc1825e970cd0ac3675696da9462
|
[
"Apache-2.0"
] | null | null | null |
EmissionTestPredictionService.py
|
plpriyanka/Emission-Test-Prediction-service
|
5605e38b65b3cc1825e970cd0ac3675696da9462
|
[
"Apache-2.0"
] | null | null | null |
EmissionTestPredictionService.py
|
plpriyanka/Emission-Test-Prediction-service
|
5605e38b65b3cc1825e970cd0ac3675696da9462
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 7 20:33:50 2017
@author: priyanka
"""
from sklearn.externals import joblib as jl
import pandas as pd
from flask import Flask
from flask import request
app = Flask(__name__)
clf = jl.load('trained_model.pkl')
def predict_emission_test(carInputData):
features = pd.DataFrame(carInputData)
result = clf.predict(features)
if result[0] == 1:
return 'Your car condition is good. Enjoy :)'
else:
return 'Oops Your car condition is bad. It might fail in emissions test :('
@app.route('/check/forpasscar', methods=['GET'])
def predict_emission_test_for_pass_car():
# show the user profile for that user
v_make = request.args.get('v_make', 'VW')
v_model_year = request.args.get('v_model_year', '')
v_gvwr = request.args.get('v_gvwr', '')
v_obd_misfire = request.args.get('v_obd_misfire', '')
v_obd_evap = request.args.get('v_obd_evap', '')
v_obd_secair = request.args.get('v_obd_secair', '')
v_obd_o2sensor = request.args.get('v_obd_o2sensor', '')
v_obd_o2heater = request.args.get('v_obd_o2heater', '')
v_obd_egr = request.args.get('v_obd_egr', '')
carInputDataForPass = {
0 : pd.Series( 0.6 , index=[0]),
1 : pd.Series( 0.7 , index=[0]),
2 : pd.Series( 0.250250393475 , index=[0]),
3 : pd.Series( 0.0 , index=[0]),
4 : pd.Series( 0.25 , index=[0]),
5 : pd.Series( 0.4 , index=[0]),
6 : pd.Series( 0.0 , index=[0]),
7 : pd.Series( 0.4 , index=[0]),
8 : pd.Series( 0.2 , index=[0]),
9 : pd.Series( 0.2 , index=[0]),
10 : pd.Series( 0.0 , index=[0]),
11 : pd.Series( 0.0 , index=[0]),
12 : pd.Series( 0.0 , index=[0]),
13 : pd.Series( 0.0 , index=[0]),
14 : pd.Series( 0.0 , index=[0]),
15 : pd.Series( 0.0 , index=[0]),
16 : pd.Series( 0.0 , index=[0]),
17 : pd.Series( 0.0 , index=[0]),
18 : pd.Series( 0.0 , index=[0]),
19 : pd.Series( 0.0 , index=[0]),
20 : pd.Series( 0.0 , index=[0]),
21 : pd.Series( 0.0 , index=[0]),
22 : pd.Series( 0.0 , index=[0]),
23 : pd.Series( 0.0 , index=[0]),
24 : pd.Series( 0.0 , index=[0]),
25 : pd.Series( 0.0 , index=[0]),
26 : pd.Series( 0.0 , index=[0]),
27 : pd.Series( 0.0 , index=[0]),
28 : pd.Series( 0.0 , index=[0]),
29 : pd.Series( 0.0 , index=[0]),
30 : pd.Series( 0.0 , index=[0]),
31 : pd.Series( 0.0 , index=[0]),
32 : pd.Series( 0.0 , index=[0]),
33 : pd.Series( 0.0 , index=[0]),
34 : pd.Series( 1.0 , index=[0]),
35 : pd.Series( 0.0 , index=[0]),
36 : pd.Series( 0.0 , index=[0]),
37 : pd.Series( 0.0 , index=[0]),
38 : pd.Series( 0.0 , index=[0]),
39 : pd.Series( 0.0 , index=[0])
}
return predict_emission_test(carInputDataForPass)
@app.route('/check/forfailcar', methods=['GET'])
def predict_emission_test_for_fail_car():
# show the user profile for that user
v_make = request.args.get('v_make', 'VW')
v_model_year = request.args.get('v_model_year', '')
v_gvwr = request.args.get('v_gvwr', '')
v_obd_misfire = request.args.get('v_obd_misfire', '')
v_obd_evap = request.args.get('v_obd_evap', '')
v_obd_secair = request.args.get('v_obd_secair', '')
v_obd_o2sensor = request.args.get('v_obd_o2sensor', '')
v_obd_o2heater = request.args.get('v_obd_o2heater', '')
v_obd_egr = request.args.get('v_obd_egr', '')
carInputDataForFail = {
0 : pd.Series( 0.2 , index=[0]),
1 : pd.Series( 0.1 , index=[0]),
2 : pd.Series( 0.125053655745 , index=[0]),
3 : pd.Series( 0.0 , index=[0]),
4 : pd.Series( 0.25 , index=[0]),
5 : pd.Series( 0.2 , index=[0]),
6 : pd.Series( 0.0 , index=[0]),
7 : pd.Series( 0.2 , index=[0]),
8 : pd.Series( 0.2 , index=[0]),
9 : pd.Series( 0.2 , index=[0]),
10 : pd.Series( 0.0 , index=[0]),
11 : pd.Series( 1.0 , index=[0]),
12 : pd.Series( 0.0 , index=[0]),
13 : pd.Series( 0.0 , index=[0]),
14 : pd.Series( 0.0 , index=[0]),
15 : pd.Series( 0.0 , index=[0]),
16 : pd.Series( 0.0 , index=[0]),
17 : pd.Series( 0.0 , index=[0]),
18 : pd.Series( 0.0 , index=[0]),
19 : pd.Series( 0.0 , index=[0]),
20 : pd.Series( 0.0 , index=[0]),
21 : pd.Series( 0.0 , index=[0]),
22 : pd.Series( 0.0 , index=[0]),
23 : pd.Series( 0.0 , index=[0]),
24 : pd.Series( 0.0 , index=[0]),
25 : pd.Series( 0.0 , index=[0]),
26 : pd.Series( 0.0 , index=[0]),
27 : pd.Series( 0.0 , index=[0]),
28 : pd.Series( 0.0 , index=[0]),
29 : pd.Series( 0.0 , index=[0]),
30 : pd.Series( 0.0 , index=[0]),
31 : pd.Series( 0.0 , index=[0]),
32 : pd.Series( 0.0 , index=[0]),
33 : pd.Series( 0.0 , index=[0]),
34 : pd.Series( 0.0 , index=[0]),
35 : pd.Series( 0.0 , index=[0]),
36 : pd.Series( 0.0 , index=[0]),
37 : pd.Series( 0.0 , index=[0]),
38 : pd.Series( 0.0 , index=[0]),
39 : pd.Series( 0.0 , index=[0])
}
return predict_emission_test(carInputDataForFail)
| 38.092199
| 83
| 0.522808
| 852
| 5,371
| 3.196009
| 0.150235
| 0.235035
| 0.257804
| 0.22769
| 0.795446
| 0.795446
| 0.765332
| 0.739625
| 0.739625
| 0.739625
| 0
| 0.109656
| 0.274995
| 5,371
| 141
| 84
| 38.092199
| 0.589625
| 0.031651
| 0
| 0.694215
| 0
| 0
| 0.068388
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024793
| false
| 0.033058
| 0.033058
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
eca891b3561487d3d7c2483ee739b2275e0687e5
| 121
|
py
|
Python
|
interact-so/hello_world.py
|
luisyanezblanco/python_certification
|
67ca7f8c2a5f891fbb7efc3e85bd240417b66838
|
[
"MIT"
] | null | null | null |
interact-so/hello_world.py
|
luisyanezblanco/python_certification
|
67ca7f8c2a5f891fbb7efc3e85bd240417b66838
|
[
"MIT"
] | null | null | null |
interact-so/hello_world.py
|
luisyanezblanco/python_certification
|
67ca7f8c2a5f891fbb7efc3e85bd240417b66838
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
print("hola mundo")
def primera():
print("hola como esta todo en la mierda de la mierda")
pass
| 17.285714
| 55
| 0.702479
| 21
| 121
| 4.047619
| 0.809524
| 0.211765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009901
| 0.165289
| 121
| 6
| 56
| 20.166667
| 0.831683
| 0.173554
| 0
| 0
| 0
| 0
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0
| 0
| 0.25
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
|
0
| 7
|
bf22dcb747291c183d1b754a1581ab47107b5bb0
| 8,838
|
py
|
Python
|
api/tests/services/test_steam_api_service.py
|
wen96/hotohete
|
03f0e37069875a4c3b0f3e4b4759805388c76c33
|
[
"MIT"
] | 2
|
2017-11-14T20:33:56.000Z
|
2018-02-18T19:48:18.000Z
|
api/tests/services/test_steam_api_service.py
|
wen96/hotohete
|
03f0e37069875a4c3b0f3e4b4759805388c76c33
|
[
"MIT"
] | 10
|
2017-06-18T11:35:26.000Z
|
2017-12-05T18:40:17.000Z
|
api/tests/services/test_steam_api_service.py
|
wen96/hotohete
|
03f0e37069875a4c3b0f3e4b4759805388c76c33
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
import urllib2
import mock
from api.services.steam_api_service import SteamAPIService
class SteamApiServiceTestCase(TestCase):
@mock.patch('api.services.steam_api_service.cache')
def test_api_key_returned_when_exist(self, mock_cache):
# Arrange
service = SteamAPIService()
mock_cache.get.return_value = 'holita'
# Act
result = service.api_key
# Asssert
self.assertEqual(result, 'holita')
self.assertEqual(mock_cache.get.call_count, 1)
self.assertEqual(mock_cache.get.call_args, mock.call('steam_api_key'))
@mock.patch.object(SteamAPIService, '_request_endpoint')
@mock.patch.object(SteamAPIService, 'api_key', new_callable=mock.PropertyMock)
def test_get_steam_id_from_nick_name_returns_none_if_user_not_found_in_reponse(
self, mock_api_key, mock_request_endpoint):
# Arrange
mock_api_key.return_value = 'hummus'
mock_request_endpoint.return_value = {'response': {'error': 'asdf'}}
nickname = 'clapton'
url = 'http://api.steampowered.com/ISteamUser/ResolveVanityURL/v0001/?key={}&vanityurl={}'.format(
mock_api_key.return_value, nickname)
service = SteamAPIService()
# Act
result = service.get_steam_id_from_nick_name(nickname)
# Asssert
self.assertIsNone(result)
self.assertEqual(mock_api_key.call_count, 1)
self.assertEqual(mock_request_endpoint.call_count, 1)
self.assertEqual(mock_request_endpoint.call_args, mock.call(url))
@mock.patch.object(SteamAPIService, '_request_endpoint')
@mock.patch.object(SteamAPIService, 'api_key', new_callable=mock.PropertyMock)
def test_get_steam_id_from_nick_name_returns_steam_id(self, mock_api_key, mock_request_endpoint):
# Arrange
mock_api_key.return_value = 'hummus'
mock_request_endpoint.return_value = {'response': {'steamid': '12341234'}}
nickname = 'clapton'
url = 'http://api.steampowered.com/ISteamUser/ResolveVanityURL/v0001/?key={}&vanityurl={}'.format(
mock_api_key.return_value, nickname)
service = SteamAPIService()
# Act
result = service.get_steam_id_from_nick_name(nickname)
self.assertEqual(mock_request_endpoint.call_count, 1)
# Asssert
self.assertEqual(result, '12341234')
self.assertEqual(mock_api_key.call_count, 1)
self.assertEqual(mock_request_endpoint.call_count, 1)
self.assertEqual(mock_request_endpoint.call_args, mock.call(url))
def test__init__default_var(self):
# Act
service = SteamAPIService()
# Assert
self.assertEqual(service.cache_steam_info, {})
self.assertEqual(service.base_url, 'http://api.steampowered.com')
@mock.patch.object(SteamAPIService, '_request_endpoint')
@mock.patch.object(SteamAPIService, 'api_key', new_callable=mock.PropertyMock)
def test_cs_info_returns_none_cause_playerstats_not_found(self, mock_api_key, mock_request_endpoint):
# Arrange
mock_request_endpoint.return_value = None
steam_id = 1234
mock_api_key.return_value = 'hummus'
url = "http://api.steampowered.com/ISteamUserStats/GetUserStatsForGame/v0002/"
url_params = "?appid=730&key=hummus&steamid=1234"
super_url = "{}{}".format(url, url_params)
service = SteamAPIService()
# Act
result = service.get_cs_info(steam_id)
# Assert
self.assertIsNone(result)
self.assertEqual(mock_request_endpoint.call_count, 1)
self.assertEqual(mock_request_endpoint.call_args, mock.call(super_url))
@mock.patch.object(SteamAPIService, '_request_endpoint')
@mock.patch.object(SteamAPIService, 'api_key', new_callable=mock.PropertyMock)
def test_cs_info_returns_playerstats(self, mock_api_key, mock_request_endpoint):
# Arrange
mock_request_endpoint.return_value = {'playerstats': {'stats': 'statA'}}
steam_id = 1234
mock_api_key.return_value = 'hummus'
url = "http://api.steampowered.com/ISteamUserStats/GetUserStatsForGame/v0002/"
url_params = "?appid=730&key=hummus&steamid=1234"
super_url = "{}{}".format(url, url_params)
service = SteamAPIService()
# Act
result = service.get_cs_info(steam_id)
# Assert
self.assertEqual(result, 'statA')
self.assertEqual(mock_request_endpoint.call_count, 1)
self.assertEqual(mock_request_endpoint.call_args, mock.call(super_url))
@mock.patch.object(SteamAPIService, '_request_endpoint')
@mock.patch.object(SteamAPIService, 'api_key', new_callable=mock.PropertyMock)
def test_cs_info_urls_properly_formed(self, mock_api_key, mock_request_endpoint):
# Arrange
mock_api_key.return_value = 'hummus'
steam_id = 1234
url = "http://api.steampowered.com/ISteamUserStats/GetUserStatsForGame/v0002/"
url_params = "?appid=730&key=hummus&steamid=1234"
super_url = "{}{}".format(url, url_params)
service = SteamAPIService()
# Act
service.get_cs_info(steam_id)
# Assert
mock_request_endpoint.assert_called_with(super_url)
self.assertEqual(mock_request_endpoint.call_count, 1)
self.assertEqual(mock_request_endpoint.call_args, mock.call(super_url))
@mock.patch.object(SteamAPIService, '_request_endpoint')
@mock.patch.object(SteamAPIService, 'api_key', new_callable=mock.PropertyMock)
def test_get_steam_info_urls_properly_formed(self, mock_api_key, mock_request_endpoint):
# Arrange
mock_api_key.return_value = 'hummus'
steam_id = 1234
url = "http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/"
url_params = "?key=hummus&steamids=1234"
super_url = "{}{}".format(url, url_params)
service = SteamAPIService()
# Act
service.get_steam_info(steam_id)
# Assert
mock_request_endpoint.assert_called_with(super_url)
self.assertEqual(mock_request_endpoint.call_count, 1)
self.assertEqual(mock_request_endpoint.call_args, mock.call(super_url))
@mock.patch.object(SteamAPIService, '_request_endpoint')
@mock.patch.object(SteamAPIService, 'api_key', new_callable=mock.PropertyMock)
def test_get_steam_info_set_steam_id_cache_as_none_cause_cs_profile_not_found(
self, mock_api_key, mock_request_endpoint):
# Arrange
mock_request_endpoint.return_value = None
mock_api_key.return_value = 'hummus'
steam_id = 1234
url = "http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/"
url_params = "?key=hummus&steamids=1234"
super_url = "{}{}".format(url, url_params)
service = SteamAPIService()
# Act
service.get_steam_info(steam_id)
# Assert
self.assertIsNone(service.cache_steam_info[steam_id])
self.assertEqual(mock_request_endpoint.call_count, 1)
self.assertEqual(mock_request_endpoint.call_args, mock.call(super_url))
@mock.patch.object(SteamAPIService, '_request_endpoint')
@mock.patch.object(SteamAPIService, 'api_key', new_callable=mock.PropertyMock)
def test_get_steam_info_when_called_twice_returns_cache_value(self, mock_api_key, mock_request_endpoint):
# Arrange
mock_api_key.return_value = 'hummus'
steam_id = 1234
url = "http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/"
url_params = "?key=hummus&steamids=1234"
super_url = "{}{}".format(url, url_params)
service = SteamAPIService()
# Act
service.get_steam_info(steam_id)
service.get_steam_info(steam_id)
# Assert
self.assertEqual(mock_request_endpoint.call_count, 1)
mock_request_endpoint.assert_called_with(super_url)
@mock.patch.object(urllib2, 'urlopen')
def test_request_service_returns_none_if_fails_to_request(self, urllib_open):
# Arrange
urllib_open.side_effect = urllib2.HTTPError('', 500, '', None, None)
url = 'urltonothing'
service = SteamAPIService()
# Act
result = service._request_endpoint(url) # pylint: disable=protected-access
# Assert
self.assertIsNone(result)
@mock.patch.object(urllib2, 'urlopen')
def test_request_service_returns_json_object_deserialized_from_response(self, urllib_open):
# Arrange
urllib_open.return_value.read.return_value = '{}'
url = 'urltonothing'
service = SteamAPIService()
# Act
result = service._request_endpoint(url) # pylint: disable=protected-access
# Assert
self.assertEqual(result, {})
| 40.541284
| 109
| 0.694954
| 1,037
| 8,838
| 5.571842
| 0.112825
| 0.109034
| 0.105227
| 0.083074
| 0.855832
| 0.842506
| 0.809104
| 0.808757
| 0.779855
| 0.779855
| 0
| 0.017584
| 0.202082
| 8,838
| 217
| 110
| 40.728111
| 0.801758
| 0.033718
| 0
| 0.755245
| 0
| 0
| 0.143849
| 0.025053
| 0
| 0
| 0
| 0
| 0.230769
| 1
| 0.083916
| false
| 0
| 0.027972
| 0
| 0.118881
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1751c5c8035754d2210f1514509d635bac839c50
| 104
|
py
|
Python
|
ticket/models/__init__.py
|
ifbhack/ticketsystem
|
f2732fe5665dd5e05ecc957446cd883c9c8d63e2
|
[
"MIT"
] | null | null | null |
ticket/models/__init__.py
|
ifbhack/ticketsystem
|
f2732fe5665dd5e05ecc957446cd883c9c8d63e2
|
[
"MIT"
] | null | null | null |
ticket/models/__init__.py
|
ifbhack/ticketsystem
|
f2732fe5665dd5e05ecc957446cd883c9c8d63e2
|
[
"MIT"
] | null | null | null |
from ticket.models.ticket import *
from ticket.models.user import *
from ticket.models.message import *
| 26
| 35
| 0.798077
| 15
| 104
| 5.533333
| 0.4
| 0.361446
| 0.578313
| 0.53012
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 104
| 3
| 36
| 34.666667
| 0.902174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
177279c8eeff06e4d2e45be6350515ec25eb791b
| 7,438
|
py
|
Python
|
src/zad3/test_zad3.py
|
TestowanieAutomatyczneUG/laboratorium-5-melkorw
|
75b8f2a9b29940c9989304f45166571b62829c60
|
[
"MIT"
] | null | null | null |
src/zad3/test_zad3.py
|
TestowanieAutomatyczneUG/laboratorium-5-melkorw
|
75b8f2a9b29940c9989304f45166571b62829c60
|
[
"MIT"
] | null | null | null |
src/zad3/test_zad3.py
|
TestowanieAutomatyczneUG/laboratorium-5-melkorw
|
75b8f2a9b29940c9989304f45166571b62829c60
|
[
"MIT"
] | null | null | null |
import unittest
from zad3.zad3 import Song
class SongTest(unittest.TestCase):
def setUp(self):
self.temp = Song()
try:
self.assertRaisesRegex
except AttributeError:
self.assertRaisesRegex = self.assertRaisesRegexp
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
def test_single_line_print(self):
self.assertEqual(self.temp.singleLine(1), 'On the first day of Christmas my true love gave to me: a Partridge in a Pear Tree.')
def test_multiple_lines_2_and_5_print(self):
self.assertEqual(self.temp.between(2,5), ['On the second day of Christmas my true love gave to me: two Turtle Doves, and a Partridge in a Pear Tree.', 'On the third day of Christmas my true love gave to me: three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the fourth day of Christmas my true love gave to me: four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the fifth day of Christmas my true love gave to me: five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.'])
def test_multiple_lines_same_arguments_print(self):
self.assertEqual(self.temp.between(2,2), [])
def test_multiple_lines_whole_text_print(self):
self.assertEqual(self.temp.between(1,12), ['On the first day of Christmas my true love gave to me: a Partridge in a Pear Tree.', 'On the second day of Christmas my true love gave to me: two Turtle Doves, and a Partridge in a Pear Tree.', 'On the third day of Christmas my true love gave to me: three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the fourth day of Christmas my true love gave to me: four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the fifth day of Christmas my true love gave to me: five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the sixth day of Christmas my true love gave to me: six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the seventh day of Christmas my true love gave to me: seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the eighth day of Christmas my true love gave to me: eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the ninth day of Christmas my true love gave to me: nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the tenth day of Christmas my true love gave to me: ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the eleventh day of Christmas my true love gave to me: eleven Pipers Piping, ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the twelfth day of Christmas my true love gave to me: twelve Drummers Drumming, eleven Pipers Piping, ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.'])
def test_whole_text_print(self):
self.assertEqual(self.temp.whole(), ['On the first day of Christmas my true love gave to me: a Partridge in a Pear Tree.', 'On the second day of Christmas my true love gave to me: two Turtle Doves, and a Partridge in a Pear Tree.', 'On the third day of Christmas my true love gave to me: three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the fourth day of Christmas my true love gave to me: four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the fifth day of Christmas my true love gave to me: five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the sixth day of Christmas my true love gave to me: six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the seventh day of Christmas my true love gave to me: seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the eighth day of Christmas my true love gave to me: eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the ninth day of Christmas my true love gave to me: nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the tenth day of Christmas my true love gave to me: ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the eleventh day of Christmas my true love gave to me: eleven Pipers Piping, ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.', 'On the twelfth day of Christmas my true love gave to me: twelve Drummers Drumming, eleven Pipers Piping, ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.'])
def test_disallow_number_of_line_bigger_than_text(self):
with self.assertRaisesWithMessage(ValueError):
self.temp.singleLine(20)
def test_disallow_negative_number_of_line(self):
with self.assertRaisesWithMessage(ValueError):
self.temp.singleLine(-5)
def test_disallow_negative_number_of_first_value(self):
with self.assertRaisesWithMessage(ValueError):
self.temp.between(-1,10)
def test_disallow_bigger_number_than_text_of_second_value(self):
with self.assertRaisesWithMessage(ValueError):
self.temp.between(2,20)
def test_disallow_second_value_bigger_than_first(self):
with self.assertRaisesWithMessage(ValueError):
self.temp.between(12,1)
def test_disallow_type_other_than_int(self):
with self.assertRaisesWithMessage(TypeError):
self.temp.singleLine(False)
def test_disallow_type_other_than_int_in_between_method(self):
with self.assertRaisesWithMessage(TypeError):
self.temp.between(True, "")
def tearDown(self):
self.temp = None
| 126.067797
| 2,457
| 0.745362
| 1,251
| 7,438
| 4.378098
| 0.094325
| 0.026474
| 0.074128
| 0.084718
| 0.902319
| 0.902319
| 0.885156
| 0.853569
| 0.790031
| 0.766295
| 0
| 0.003936
| 0.18029
| 7,438
| 59
| 2,458
| 126.067797
| 0.894374
| 0
| 0
| 0.159091
| 0
| 0.590909
| 0.716763
| 0
| 0
| 0
| 0
| 0
| 0.363636
| 1
| 0.340909
| false
| 0
| 0.045455
| 0.022727
| 0.431818
| 0.113636
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
bdade130651c12dc7580e60dd669d6ba4e11d20b
| 112
|
py
|
Python
|
simplecompiler/generate/__init__.py
|
cgsdfc/Optimize
|
f3a28ccd5caf01d2a306859d641a1cb5aa0d36f3
|
[
"MIT"
] | 1
|
2019-07-13T16:40:47.000Z
|
2019-07-13T16:40:47.000Z
|
simplecompiler/generate/__init__.py
|
cgsdfc/simplecc
|
f3a28ccd5caf01d2a306859d641a1cb5aa0d36f3
|
[
"MIT"
] | null | null | null |
simplecompiler/generate/__init__.py
|
cgsdfc/simplecc
|
f3a28ccd5caf01d2a306859d641a1cb5aa0d36f3
|
[
"MIT"
] | null | null | null |
from simplecompiler.generate.grammar.pgen2 import generate_grammar
from simplecompiler.generate.ast import asdl
| 37.333333
| 66
| 0.883929
| 14
| 112
| 7
| 0.571429
| 0.367347
| 0.530612
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009615
| 0.071429
| 112
| 2
| 67
| 56
| 0.932692
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
bded3da14e86d393904e88038c3f8451f2bbec7f
| 38,644
|
py
|
Python
|
swagger_client/api/contracts_api.py
|
rseichter/bootini-star
|
a80258f01a05e4df38748b8cb47dfadabd42c20d
|
[
"MIT"
] | null | null | null |
swagger_client/api/contracts_api.py
|
rseichter/bootini-star
|
a80258f01a05e4df38748b8cb47dfadabd42c20d
|
[
"MIT"
] | null | null | null |
swagger_client/api/contracts_api.py
|
rseichter/bootini-star
|
a80258f01a05e4df38748b8cb47dfadabd42c20d
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online # noqa: E501
OpenAPI spec version: 0.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class ContractsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_characters_character_id_contracts(self, character_id, **kwargs): # noqa: E501
"""Get contracts # noqa: E501
Returns contracts available to a character, only if the character is issuer, acceptor or assignee. Only returns contracts no older than 30 days, or if the status is \"in_progress\". --- This route is cached for up to 300 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_contracts(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdContracts200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_contracts_with_http_info(character_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_contracts_with_http_info(character_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_contracts_with_http_info(self, character_id, **kwargs): # noqa: E501
"""Get contracts # noqa: E501
Returns contracts available to a character, only if the character is issuer, acceptor or assignee. Only returns contracts no older than 30 days, or if the status is \"in_progress\". --- This route is cached for up to 300 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_contracts_with_http_info(character_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdContracts200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_contracts" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_contracts`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_contracts`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/contracts/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdContracts200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_contracts_contract_id_bids(self, character_id, contract_id, **kwargs): # noqa: E501
"""Get contract bids # noqa: E501
Lists bids on a particular auction contract --- This route is cached for up to 300 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_contracts_contract_id_bids(character_id, contract_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param int contract_id: ID of a contract (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdContractsContractIdBids200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_contracts_contract_id_bids_with_http_info(character_id, contract_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_contracts_contract_id_bids_with_http_info(character_id, contract_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_contracts_contract_id_bids_with_http_info(self, character_id, contract_id, **kwargs): # noqa: E501
"""Get contract bids # noqa: E501
Lists bids on a particular auction contract --- This route is cached for up to 300 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_contracts_contract_id_bids_with_http_info(character_id, contract_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param int contract_id: ID of a contract (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdContractsContractIdBids200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'contract_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_contracts_contract_id_bids" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_contracts_contract_id_bids`") # noqa: E501
# verify the required parameter 'contract_id' is set
if ('contract_id' not in params or
params['contract_id'] is None):
raise ValueError("Missing the required parameter `contract_id` when calling `get_characters_character_id_contracts_contract_id_bids`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_contracts_contract_id_bids`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
if 'contract_id' in params:
path_params['contract_id'] = params['contract_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/contracts/{contract_id}/bids/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdContractsContractIdBids200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_contracts_contract_id_items(self, character_id, contract_id, **kwargs): # noqa: E501
"""Get contract items # noqa: E501
Lists items of a particular contract --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_contracts_contract_id_items(character_id, contract_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param int contract_id: ID of a contract (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdContractsContractIdItems200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_contracts_contract_id_items_with_http_info(character_id, contract_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_contracts_contract_id_items_with_http_info(character_id, contract_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_contracts_contract_id_items_with_http_info(self, character_id, contract_id, **kwargs): # noqa: E501
"""Get contract items # noqa: E501
Lists items of a particular contract --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_contracts_contract_id_items_with_http_info(character_id, contract_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param int contract_id: ID of a contract (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdContractsContractIdItems200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'contract_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_contracts_contract_id_items" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_contracts_contract_id_items`") # noqa: E501
# verify the required parameter 'contract_id' is set
if ('contract_id' not in params or
params['contract_id'] is None):
raise ValueError("Missing the required parameter `contract_id` when calling `get_characters_character_id_contracts_contract_id_items`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_contracts_contract_id_items`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
if 'contract_id' in params:
path_params['contract_id'] = params['contract_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/contracts/{contract_id}/items/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdContractsContractIdItems200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_contracts(self, corporation_id, **kwargs): # noqa: E501
"""Get coporation contracts # noqa: E501
Returns contracts available to a coporation, only if the corporation is issuer, acceptor or assignee. Only returns contracts no older than 30 days, or if the status is \"in_progress\". --- This route is cached for up to 300 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_contracts(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdContracts200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_contracts_with_http_info(corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_contracts_with_http_info(corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_contracts_with_http_info(self, corporation_id, **kwargs): # noqa: E501
"""Get coporation contracts # noqa: E501
Returns contracts available to a coporation, only if the corporation is issuer, acceptor or assignee. Only returns contracts no older than 30 days, or if the status is \"in_progress\". --- This route is cached for up to 300 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_contracts_with_http_info(corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdContracts200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_contracts" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_contracts`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_contracts`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/contracts/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdContracts200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_contracts_contract_id_bids(self, contract_id, corporation_id, **kwargs): # noqa: E501
"""Get corporation contract bids # noqa: E501
Lists bids on a particular auction contract --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_contracts_contract_id_bids(contract_id, corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int contract_id: ID of a contract (required)
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdContractsContractIdBids200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_contracts_contract_id_bids_with_http_info(contract_id, corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_contracts_contract_id_bids_with_http_info(contract_id, corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_contracts_contract_id_bids_with_http_info(self, contract_id, corporation_id, **kwargs): # noqa: E501
"""Get corporation contract bids # noqa: E501
Lists bids on a particular auction contract --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_contracts_contract_id_bids_with_http_info(contract_id, corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int contract_id: ID of a contract (required)
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param int page: Which page of results to return
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdContractsContractIdBids200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['contract_id', 'corporation_id', 'datasource', 'page', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_contracts_contract_id_bids" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'contract_id' is set
if ('contract_id' not in params or
params['contract_id'] is None):
raise ValueError("Missing the required parameter `contract_id` when calling `get_corporations_corporation_id_contracts_contract_id_bids`") # noqa: E501
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_contracts_contract_id_bids`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_contracts_contract_id_bids`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'contract_id' in params:
path_params['contract_id'] = params['contract_id'] # noqa: E501
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/contracts/{contract_id}/bids/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdContractsContractIdBids200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_corporations_corporation_id_contracts_contract_id_items(self, contract_id, corporation_id, **kwargs): # noqa: E501
"""Get corporation contract items # noqa: E501
Lists items of a particular contract --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_contracts_contract_id_items(contract_id, corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int contract_id: ID of a contract (required)
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdContractsContractIdItems200Ok]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_corporations_corporation_id_contracts_contract_id_items_with_http_info(contract_id, corporation_id, **kwargs) # noqa: E501
else:
(data) = self.get_corporations_corporation_id_contracts_contract_id_items_with_http_info(contract_id, corporation_id, **kwargs) # noqa: E501
return data
def get_corporations_corporation_id_contracts_contract_id_items_with_http_info(self, contract_id, corporation_id, **kwargs): # noqa: E501
"""Get corporation contract items # noqa: E501
Lists items of a particular contract --- This route is cached for up to 3600 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_corporations_corporation_id_contracts_contract_id_items_with_http_info(contract_id, corporation_id, async=True)
>>> result = thread.get()
:param async bool
:param int contract_id: ID of a contract (required)
:param int corporation_id: An EVE corporation ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCorporationsCorporationIdContractsContractIdItems200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['contract_id', 'corporation_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_corporations_corporation_id_contracts_contract_id_items" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'contract_id' is set
if ('contract_id' not in params or
params['contract_id'] is None):
raise ValueError("Missing the required parameter `contract_id` when calling `get_corporations_corporation_id_contracts_contract_id_items`") # noqa: E501
# verify the required parameter 'corporation_id' is set
if ('corporation_id' not in params or
params['corporation_id'] is None):
raise ValueError("Missing the required parameter `corporation_id` when calling `get_corporations_corporation_id_contracts_contract_id_items`") # noqa: E501
if 'corporation_id' in params and params['corporation_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `corporation_id` when calling `get_corporations_corporation_id_contracts_contract_id_items`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'contract_id' in params:
path_params['contract_id'] = params['contract_id'] # noqa: E501
if 'corporation_id' in params:
path_params['corporation_id'] = params['corporation_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/corporations/{corporation_id}/contracts/{contract_id}/items/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCorporationsCorporationIdContractsContractIdItems200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 51.048877
| 255
| 0.659766
| 4,673
| 38,644
| 5.212711
| 0.044725
| 0.043352
| 0.03432
| 0.037933
| 0.970606
| 0.970606
| 0.969128
| 0.966008
| 0.965023
| 0.964284
| 0
| 0.018378
| 0.257944
| 38,644
| 756
| 256
| 51.116402
| 0.831078
| 0.052557
| 0
| 0.810811
| 1
| 0.014742
| 0.281423
| 0.099946
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.009828
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
da443238c5cb4a4539dfda9129cec4d70131c44d
| 4,058
|
py
|
Python
|
data.py
|
Drakula44/skolsko_zvono
|
786f49038b9251dfccc553446845fe4a6fb49fda
|
[
"MIT"
] | 1
|
2021-05-04T13:30:04.000Z
|
2021-05-04T13:30:04.000Z
|
data.py
|
Drakula44/skolsko_zvono
|
786f49038b9251dfccc553446845fe4a6fb49fda
|
[
"MIT"
] | null | null | null |
data.py
|
Drakula44/skolsko_zvono
|
786f49038b9251dfccc553446845fe4a6fb49fda
|
[
"MIT"
] | null | null | null |
from flask_assets import Bundle
working = True
bundles = {
'raspored_js': Bundle(
'scripts/addNew.js',
'scripts/jquery-3.6.0.min.js',
output='gen/home.js'),
'raspored_css': Bundle(
'styles/raspored.css',
output='gen/home.css',
filters='cssmin'),
'main_css': Bundle(
'styles/main.css',
output='gen/main.css',
filters='cssmin'
)
}
short_schedule = {
"monday" : [['07:55', '08:25'], ['08:30', '09:00'], ['09:05', '09:35'], ['09:50', '10:20'], ['10:25', '10:55'], ['11:10', '11:40'], ['11:45', '12:15'], ['12:30', '13:00'], ['13:05', '13:35'], ['13:50', '14:20'], ['14:25', '14:55'], ['15:10', '15:40'], ['15:45', '16:15']],
"tuesday" : [['07:55', '08:25'], ['08:30', '09:00'], ['09:05', '09:35'], ['09:50', '10:20'], ['10:25', '10:55'], ['11:10', '11:40'], ['11:45', '12:15'], ['12:30', '13:00'], ['13:05', '13:35'], ['13:50', '14:20'], ['14:25', '14:55'], ['15:10', '15:40'], ['15:45', '16:15']],
"wednesday" : [['07:55', '08:25'], ['08:30', '09:00'], ['09:05', '09:35'], ['09:50', '10:20'], ['10:25', '10:55'], ['11:10', '11:40'], ['11:45', '12:15'], ['12:30', '13:00'], ['13:05', '13:35'], ['13:50', '14:20'], ['14:25', '14:55'], ['15:10', '15:40'], ['15:45', '16:15']],
"thursday" : [['07:55', '08:25'], ['08:30', '09:00'], ['09:05', '09:35'], ['09:50', '10:20'], ['10:25', '10:55'], ['11:10', '11:40'], ['11:45', '12:15'], ['12:30', '13:00'], ['13:05', '13:35'], ['13:50', '14:20'], ['14:25', '14:55'], ['15:10', '15:40'], ['15:45', '16:15']],
"friday" : [['07:55', '08:25'], ['08:30', '09:00'], ['09:05', '09:35'], ['09:50', '10:20'], ['10:25', '10:55'], ['11:10', '11:40'], ['11:45', '12:15'], ['12:30', '13:00'], ['13:05', '13:35'], ['13:50', '14:20'], ['14:25', '14:55'], ['15:10', '15:40'], ['15:45', '16:15']],
"saturday" : [['09:00', '09:45'], ['09:50', '10:35'], ['11:00', '11:45'], ['11:50', '12:35'], ['13:00', '13:45'], ['13:50', '14:35']],
"sunday" : [['09:00', '09:45'], ['09:50', '10:35'], ['11:00', '11:45'], ['11:50', '12:35'], ['13:00', '13:45'], ['13:50', '14:35']]
}
long_schedule = {
"monday" : [["07:40" ,"08:25"], ["08:30" ,"09:15"], ["09:20" ,"10:05"], ["10:25","11:10"], ["11:15","12:00"], ["12:15","13:00"], ["13:05","13:50"], ["14:00","14:45"], ["14:50","15:35"], ["15:55","16:40"], ["16:45","17:30"], ["17:45","18:30"], ["18:35","19:20"]],
"tuesday" : [["07:40" ,"08:25"], ["08:30" ,"09:15"], ["09:20" ,"10:05"], ["10:25","11:10"], ["11:15","12:00"], ["12:15","13:00"], ["13:05","13:50"], ["14:00","14:45"], ["14:50","15:35"], ["15:55","16:40"], ["16:45","17:30"], ["17:45","18:30"], ["18:35","19:20"]],
"wednesday" : [["07:40" ,"08:25"], ["08:30" ,"09:15"], ["09:20" ,"10:05"], ["10:25","11:10"], ["11:15","12:00"], ["12:15","13:00"], ["13:05","13:50"], ["14:00","14:45"], ["14:50","15:35"], ["15:55","16:40"], ["16:45","17:30"], ["17:45","18:30"], ["18:35","19:20"]],
"thursday" : [["07:40" ,"08:25"], ["08:30" ,"09:15"], ["09:20" ,"10:05"], ["10:25","11:10"], ["11:15","12:00"], ["12:15","13:00"], ["13:05","13:50"], ["14:00","14:45"], ["14:50","15:35"], ["15:55","16:40"], ["16:45","17:30"], ["17:45","18:30"], ["18:35","19:20"]],
"friday" : [["07:40" ,"08:25"], ["08:30" ,"09:15"], ["09:20" ,"10:05"], ["10:25","11:10"], ["11:15","12:00"], ["12:15","13:00"], ["13:05","13:50"], ["14:00","14:45"], ["14:50","15:35"], ["15:55","16:40"], ["16:45","17:30"], ["17:45","18:30"], ["18:35","19:20"]],
"saturday" : [['09:00', '09:45'], ['09:50', '10:35'], ['11:00', '11:45'], ['11:50', '12:35'], ['13:00', '13:45'], ['13:50', '14:35']],
"sunday" : [['09:00', '09:45'], ['09:50', '10:35'], ['11:00', '11:45'], ['11:50', '12:35'], ['13:00', '13:45'], ['13:50', '14:35']]
}
days_in_week = {
"monday" : 'Ponedeljak',
"tuesday" : 'Utorak',
"wednesday" : 'Sreda',
"thursday" : 'Četvrtak',
"friday" : 'Petak',
"saturday" : 'Subota',
"sunday" : 'Nedelja'
}
weekly_schedule = long_schedule
| 72.464286
| 281
| 0.426318
| 704
| 4,058
| 2.443182
| 0.098011
| 0.032558
| 0.048837
| 0.046512
| 0.732558
| 0.732558
| 0.732558
| 0.732558
| 0.732558
| 0.732558
| 0
| 0.357246
| 0.148103
| 4,058
| 55
| 282
| 73.781818
| 0.140295
| 0
| 0
| 0.088889
| 0
| 0
| 0.466601
| 0.006655
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022222
| 0
| 0.022222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
da560013e566e7e96e7fc0571fa4ce6aa1d9d687
| 42,151
|
py
|
Python
|
swagger_client/api/notification_api.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
swagger_client/api/notification_api.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
swagger_client/api/notification_api.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
[AHOI cookbook](/ahoi/docs/cookbook/index.html) [Data Privacy](/sandboxmanager/#/privacy) [Terms of Service](/sandboxmanager/#/terms) [Imprint](https://sparkassen-hub.com/impressum/) © 2016‐2017 Starfinanz - Ein Unternehmen der Finanz Informatik # noqa: E501
OpenAPI spec version: 2.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class NotificationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_notification(self, target_id, notification_id, **kwargs): # noqa: E501
"""Delete notification # noqa: E501
Delete notification identified by **notificationId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_notification(target_id, notification_id, async=True)
>>> result = thread.get()
:param async bool
:param int target_id: The **targetId** for which to delete the notification (required)
:param int notification_id: The **notificationId** to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_notification_with_http_info(target_id, notification_id, **kwargs) # noqa: E501
else:
(data) = self.delete_notification_with_http_info(target_id, notification_id, **kwargs) # noqa: E501
return data
def delete_notification_with_http_info(self, target_id, notification_id, **kwargs): # noqa: E501
"""Delete notification # noqa: E501
Delete notification identified by **notificationId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_notification_with_http_info(target_id, notification_id, async=True)
>>> result = thread.get()
:param async bool
:param int target_id: The **targetId** for which to delete the notification (required)
:param int notification_id: The **notificationId** to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['target_id', 'notification_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_notification" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'target_id' is set
if ('target_id' not in params or
params['target_id'] is None):
raise ValueError("Missing the required parameter `target_id` when calling `delete_notification`") # noqa: E501
# verify the required parameter 'notification_id' is set
if ('notification_id' not in params or
params['notification_id'] is None):
raise ValueError("Missing the required parameter `notification_id` when calling `delete_notification`") # noqa: E501
collection_formats = {}
path_params = {}
if 'target_id' in params:
path_params['targetId'] = params['target_id'] # noqa: E501
if 'notification_id' in params:
path_params['notificationId'] = params['notification_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/notificationtargets/{targetId}/notifications/{notificationId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_notification(self, target_id, notification_id, **kwargs): # noqa: E501
"""Get notification # noqa: E501
Retrieve notification identified by **notificationId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_notification(target_id, notification_id, async=True)
>>> result = thread.get()
:param async bool
:param int target_id: The **targetId** for which to retrieve the notification (required)
:param int notification_id: The *notificationId* to retrieve (required)
:return: Notification
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_notification_with_http_info(target_id, notification_id, **kwargs) # noqa: E501
else:
(data) = self.get_notification_with_http_info(target_id, notification_id, **kwargs) # noqa: E501
return data
def get_notification_with_http_info(self, target_id, notification_id, **kwargs): # noqa: E501
"""Get notification # noqa: E501
Retrieve notification identified by **notificationId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_notification_with_http_info(target_id, notification_id, async=True)
>>> result = thread.get()
:param async bool
:param int target_id: The **targetId** for which to retrieve the notification (required)
:param int notification_id: The *notificationId* to retrieve (required)
:return: Notification
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['target_id', 'notification_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_notification" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'target_id' is set
if ('target_id' not in params or
params['target_id'] is None):
raise ValueError("Missing the required parameter `target_id` when calling `get_notification`") # noqa: E501
# verify the required parameter 'notification_id' is set
if ('notification_id' not in params or
params['notification_id'] is None):
raise ValueError("Missing the required parameter `notification_id` when calling `get_notification`") # noqa: E501
collection_formats = {}
path_params = {}
if 'target_id' in params:
path_params['targetId'] = params['target_id'] # noqa: E501
if 'notification_id' in params:
path_params['notificationId'] = params['notification_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/notificationtargets/{targetId}/notifications/{notificationId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Notification', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_notifications(self, target_id, **kwargs): # noqa: E501
"""List notifications # noqa: E501
Retrieve all notifications associated with **targetId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_notifications(target_id, async=True)
>>> result = thread.get()
:param async bool
:param int target_id: The **targetId** for which to retrieve notifications (required)
:return: list[Notification]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_notifications_with_http_info(target_id, **kwargs) # noqa: E501
else:
(data) = self.get_notifications_with_http_info(target_id, **kwargs) # noqa: E501
return data
def get_notifications_with_http_info(self, target_id, **kwargs): # noqa: E501
"""List notifications # noqa: E501
Retrieve all notifications associated with **targetId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_notifications_with_http_info(target_id, async=True)
>>> result = thread.get()
:param async bool
:param int target_id: The **targetId** for which to retrieve notifications (required)
:return: list[Notification]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['target_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_notifications" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'target_id' is set
if ('target_id' not in params or
params['target_id'] is None):
raise ValueError("Missing the required parameter `target_id` when calling `get_notifications`") # noqa: E501
collection_formats = {}
path_params = {}
if 'target_id' in params:
path_params['targetId'] = params['target_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/notificationtargets/{targetId}/notifications', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Notification]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_balance_change_notification(self, target_id, notification_dto, **kwargs): # noqa: E501
"""Create a balance change notification # noqa: E501
Only one notification per target and account can be created. Receive notifications when the balance crosses the configured threshold; only one of **lowerThreshold** and **upperThreshold** may be set. This example sends a notification when the balance is less than 0 € ```json { \"type\": \"BalanceChangeNotification\", \"accountId\": 0, \"upperThreshold\": { \"value\": 0, \"currency\": \"EUR\" } } ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_balance_change_notification(target_id, notification_dto, async=True)
>>> result = thread.get()
:param async bool
:param int target_id: The **targetId** for which to create the notification (required)
:param BalanceChangeNotification notification_dto: The balance change notification to create (required)
:return: BalanceChangeNotification
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_balance_change_notification_with_http_info(target_id, notification_dto, **kwargs) # noqa: E501
else:
(data) = self.post_balance_change_notification_with_http_info(target_id, notification_dto, **kwargs) # noqa: E501
return data
def post_balance_change_notification_with_http_info(self, target_id, notification_dto, **kwargs): # noqa: E501
"""Create a balance change notification # noqa: E501
Only one notification per target and account can be created. Receive notifications when the balance crosses the configured threshold; only one of **lowerThreshold** and **upperThreshold** may be set. This example sends a notification when the balance is less than 0 € ```json { \"type\": \"BalanceChangeNotification\", \"accountId\": 0, \"upperThreshold\": { \"value\": 0, \"currency\": \"EUR\" } } ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_balance_change_notification_with_http_info(target_id, notification_dto, async=True)
>>> result = thread.get()
:param async bool
:param int target_id: The **targetId** for which to create the notification (required)
:param BalanceChangeNotification notification_dto: The balance change notification to create (required)
:return: BalanceChangeNotification
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['target_id', 'notification_dto'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_balance_change_notification" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'target_id' is set
if ('target_id' not in params or
params['target_id'] is None):
raise ValueError("Missing the required parameter `target_id` when calling `post_balance_change_notification`") # noqa: E501
# verify the required parameter 'notification_dto' is set
if ('notification_dto' not in params or
params['notification_dto'] is None):
raise ValueError("Missing the required parameter `notification_dto` when calling `post_balance_change_notification`") # noqa: E501
collection_formats = {}
path_params = {}
if 'target_id' in params:
path_params['targetId'] = params['target_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'notification_dto' in params:
body_params = params['notification_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/notificationtargets/{targetId}/notifications/balancechangenotification', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BalanceChangeNotification', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_budget_notification(self, target_id, notification_dto, **kwargs): # noqa: E501
"""Create a budget change notification # noqa: E501
Receive notifications when the calculated budget for the current month crosses the configured threshold; only one of **lowerThreshold** and **upperThreshold** may be set. This example send a notification when the budget crosses 100 € ```json { \"type\": \"BudgetNotification\", \"accountId\": 0, \"lowerThreshold\": { \"value\": 10000, \"currency\": \"EUR\" } } ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_budget_notification(target_id, notification_dto, async=True)
>>> result = thread.get()
:param async bool
:param int target_id: The **targetId** for which to create the notification (required)
:param BudgetNotification notification_dto: The budget change notification to create (required)
:return: BudgetNotification
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_budget_notification_with_http_info(target_id, notification_dto, **kwargs) # noqa: E501
else:
(data) = self.post_budget_notification_with_http_info(target_id, notification_dto, **kwargs) # noqa: E501
return data
def post_budget_notification_with_http_info(self, target_id, notification_dto, **kwargs): # noqa: E501
"""Create a budget change notification # noqa: E501
Receive notifications when the calculated budget for the current month crosses the configured threshold; only one of **lowerThreshold** and **upperThreshold** may be set. This example send a notification when the budget crosses 100 € ```json { \"type\": \"BudgetNotification\", \"accountId\": 0, \"lowerThreshold\": { \"value\": 10000, \"currency\": \"EUR\" } } ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_budget_notification_with_http_info(target_id, notification_dto, async=True)
>>> result = thread.get()
:param async bool
:param int target_id: The **targetId** for which to create the notification (required)
:param BudgetNotification notification_dto: The budget change notification to create (required)
:return: BudgetNotification
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['target_id', 'notification_dto'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_budget_notification" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'target_id' is set
if ('target_id' not in params or
params['target_id'] is None):
raise ValueError("Missing the required parameter `target_id` when calling `post_budget_notification`") # noqa: E501
# verify the required parameter 'notification_dto' is set
if ('notification_dto' not in params or
params['notification_dto'] is None):
raise ValueError("Missing the required parameter `notification_dto` when calling `post_budget_notification`") # noqa: E501
collection_formats = {}
path_params = {}
if 'target_id' in params:
path_params['targetId'] = params['target_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'notification_dto' in params:
body_params = params['notification_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/notificationtargets/{targetId}/notifications/budgetnotification', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BudgetNotification', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_daily_summary_notification(self, target_id, notification_dto, **kwargs): # noqa: E501
"""Create a daily summary notification # noqa: E501
Receive a notification for your account status on the configured days and at the configured time. Please make sure to pass your timezone or adjust for UTC. This example notifies you every day at 12:03 UTC ```json { \"type\": \"DailySummaryNotification\", \"accountId\": 0, \"daysOfWeek\": [ 'MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY', 'FRIDAY', 'SATURDAY', 'SUNDAY' ], \"timeOfDay\": \"12:03Z\" } ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_daily_summary_notification(target_id, notification_dto, async=True)
>>> result = thread.get()
:param async bool
:param int target_id: The **targetId** for which to create the notification (required)
:param DailySummaryNotification notification_dto: The daily summary notification to create (required)
:return: DailySummaryNotification
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_daily_summary_notification_with_http_info(target_id, notification_dto, **kwargs) # noqa: E501
else:
(data) = self.post_daily_summary_notification_with_http_info(target_id, notification_dto, **kwargs) # noqa: E501
return data
def post_daily_summary_notification_with_http_info(self, target_id, notification_dto, **kwargs): # noqa: E501
"""Create a daily summary notification # noqa: E501
Receive a notification for your account status on the configured days and at the configured time. Please make sure to pass your timezone or adjust for UTC. This example notifies you every day at 12:03 UTC ```json { \"type\": \"DailySummaryNotification\", \"accountId\": 0, \"daysOfWeek\": [ 'MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY', 'FRIDAY', 'SATURDAY', 'SUNDAY' ], \"timeOfDay\": \"12:03Z\" } ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_daily_summary_notification_with_http_info(target_id, notification_dto, async=True)
>>> result = thread.get()
:param async bool
:param int target_id: The **targetId** for which to create the notification (required)
:param DailySummaryNotification notification_dto: The daily summary notification to create (required)
:return: DailySummaryNotification
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['target_id', 'notification_dto'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_daily_summary_notification" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'target_id' is set
if ('target_id' not in params or
params['target_id'] is None):
raise ValueError("Missing the required parameter `target_id` when calling `post_daily_summary_notification`") # noqa: E501
# verify the required parameter 'notification_dto' is set
if ('notification_dto' not in params or
params['notification_dto'] is None):
raise ValueError("Missing the required parameter `notification_dto` when calling `post_daily_summary_notification`") # noqa: E501
collection_formats = {}
path_params = {}
if 'target_id' in params:
path_params['targetId'] = params['target_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'notification_dto' in params:
body_params = params['notification_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/notificationtargets/{targetId}/notifications/dailysummarynotification', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DailySummaryNotification', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_new_transaction_notification(self, target_id, notification_dto, **kwargs): # noqa: E501
"""Create a new transaction notification # noqa: E501
Receive a notification for every new transaction, for transactions within a given threshold or that match a **searchKeyword**. To receive all transactions, simply leave the optional fields blank. This example notifies you of every transaction that contains the keyword \"food\" between 20 € and 60 €: ```json { \"type\": \"NewTransactionNotification\", \"accountId\": 0, \"lowerThreshold\": { \"value\": 2000, \"currency\": \"EUR\" }, \"upperThreshold\": { \"value\": 6000, \"currency\": \"EUR\" }, \"searchKeyword\": \"Rent\" } ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_new_transaction_notification(target_id, notification_dto, async=True)
>>> result = thread.get()
:param async bool
:param int target_id: The **targetId** for which to create the notification (required)
:param NewTransactionNotification notification_dto: The new transaction notification to create (required)
:return: NewTransactionNotification
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_new_transaction_notification_with_http_info(target_id, notification_dto, **kwargs) # noqa: E501
else:
(data) = self.post_new_transaction_notification_with_http_info(target_id, notification_dto, **kwargs) # noqa: E501
return data
def post_new_transaction_notification_with_http_info(self, target_id, notification_dto, **kwargs): # noqa: E501
"""Create a new transaction notification # noqa: E501
Receive a notification for every new transaction, for transactions within a given threshold or that match a **searchKeyword**. To receive all transactions, simply leave the optional fields blank. This example notifies you of every transaction that contains the keyword \"food\" between 20 € and 60 €: ```json { \"type\": \"NewTransactionNotification\", \"accountId\": 0, \"lowerThreshold\": { \"value\": 2000, \"currency\": \"EUR\" }, \"upperThreshold\": { \"value\": 6000, \"currency\": \"EUR\" }, \"searchKeyword\": \"Rent\" } ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_new_transaction_notification_with_http_info(target_id, notification_dto, async=True)
>>> result = thread.get()
:param async bool
:param int target_id: The **targetId** for which to create the notification (required)
:param NewTransactionNotification notification_dto: The new transaction notification to create (required)
:return: NewTransactionNotification
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['target_id', 'notification_dto'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_new_transaction_notification" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'target_id' is set
if ('target_id' not in params or
params['target_id'] is None):
raise ValueError("Missing the required parameter `target_id` when calling `post_new_transaction_notification`") # noqa: E501
# verify the required parameter 'notification_dto' is set
if ('notification_dto' not in params or
params['notification_dto'] is None):
raise ValueError("Missing the required parameter `notification_dto` when calling `post_new_transaction_notification`") # noqa: E501
collection_formats = {}
path_params = {}
if 'target_id' in params:
path_params['targetId'] = params['target_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'notification_dto' in params:
body_params = params['notification_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/notificationtargets/{targetId}/notifications/newtransactionnotification', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NewTransactionNotification', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def put_notification(self, target_id, notification_id, notification_dto, **kwargs): # noqa: E501
"""Update notification # noqa: E501
Update the notification identified by **notificationId**. The **notificationId** must match the **id** in **notificationDto**. Please note that type depending restrictions from creating a notification also apply here. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.put_notification(target_id, notification_id, notification_dto, async=True)
>>> result = thread.get()
:param async bool
:param int target_id: The **targetId** for which to create the notification (required)
:param int notification_id: The **notificationId** to update (required)
:param Notification notification_dto: The notification data to update (required)
:return: Notification
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.put_notification_with_http_info(target_id, notification_id, notification_dto, **kwargs) # noqa: E501
else:
(data) = self.put_notification_with_http_info(target_id, notification_id, notification_dto, **kwargs) # noqa: E501
return data
def put_notification_with_http_info(self, target_id, notification_id, notification_dto, **kwargs): # noqa: E501
"""Update notification # noqa: E501
Update the notification identified by **notificationId**. The **notificationId** must match the **id** in **notificationDto**. Please note that type depending restrictions from creating a notification also apply here. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.put_notification_with_http_info(target_id, notification_id, notification_dto, async=True)
>>> result = thread.get()
:param async bool
:param int target_id: The **targetId** for which to create the notification (required)
:param int notification_id: The **notificationId** to update (required)
:param Notification notification_dto: The notification data to update (required)
:return: Notification
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['target_id', 'notification_id', 'notification_dto'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_notification" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'target_id' is set
if ('target_id' not in params or
params['target_id'] is None):
raise ValueError("Missing the required parameter `target_id` when calling `put_notification`") # noqa: E501
# verify the required parameter 'notification_id' is set
if ('notification_id' not in params or
params['notification_id'] is None):
raise ValueError("Missing the required parameter `notification_id` when calling `put_notification`") # noqa: E501
# verify the required parameter 'notification_dto' is set
if ('notification_dto' not in params or
params['notification_dto'] is None):
raise ValueError("Missing the required parameter `notification_dto` when calling `put_notification`") # noqa: E501
collection_formats = {}
path_params = {}
if 'target_id' in params:
path_params['targetId'] = params['target_id'] # noqa: E501
if 'notification_id' in params:
path_params['notificationId'] = params['notification_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'notification_dto' in params:
body_params = params['notification_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/notificationtargets/{targetId}/notifications/{notificationId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Notification', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 48.172571
| 601
| 0.637423
| 4,685
| 42,151
| 5.517183
| 0.057844
| 0.041164
| 0.037914
| 0.022284
| 0.965452
| 0.963092
| 0.961815
| 0.955857
| 0.951795
| 0.949126
| 0
| 0.015978
| 0.269484
| 42,151
| 874
| 602
| 48.227689
| 0.823461
| 0.058623
| 0
| 0.811839
| 0
| 0
| 0.227622
| 0.06558
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.008457
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e589944bc37682f4c2fa1b40239ab36f4f76838f
| 8,995
|
py
|
Python
|
multiagent_powergrid/networks/imped2seq.py
|
hepengli/multiagent-powergrid
|
f4a5e78a36f57e583fce1c51b63904b75bde5163
|
[
"MIT"
] | null | null | null |
multiagent_powergrid/networks/imped2seq.py
|
hepengli/multiagent-powergrid
|
f4a5e78a36f57e583fce1c51b63904b75bde5163
|
[
"MIT"
] | null | null | null |
multiagent_powergrid/networks/imped2seq.py
|
hepengli/multiagent-powergrid
|
f4a5e78a36f57e583fce1c51b63904b75bde5163
|
[
"MIT"
] | null | null | null |
import numpy as np
np.set_printoptions(precision=7)
def imped2seq(Z_abc):
a = np.exp(1j*np.deg2rad(120))
A = np.array([
[1., 1., 1.],
[1., a**2, a],
[1., a, a**2]
])
return np.linalg.inv(A).dot(Z_abc).dot(A)
# Config 300
Z_abc = np.array([
[1.3368+1j*1.3343, 0.2101+1j*0.5779, 0.2130+1j*0.5015],
[0.2101+1j*0.5779, 1.3238+1j*1.3569, 0.2066+1j*0.4591],
[0.2130+1j*0.5015, 0.2066+1j*0.4591, 1.3294+1j*1.3471]
]) # ohms/mile
B_abc = np.array([
[5.3350, -1.5313, -0.9943],
[-1.5313, 5.0979, -0.6212],
[-0.9943, -0.6212, 4.8880],
]) # micro Siemens /mile
Z_012 = imped2seq(Z_abc) / 1.609344 # ohms/km
B_012 = imped2seq(B_abc) / 1.609344 # micro Siemens/km
print('c1: {}'.format(B_012[1,1].real))
print('r1: {}'.format(Z_012[1,1].real))
print('x1: {}'.format(Z_012[1,1].imag))
print('c0: {}'.format(B_012[0,0].real))
print('r0: {}'.format(Z_012[0,0].real))
print('x0: {}'.format(Z_012[0,0].imag))
# Config 1
Z_abc = np.array([
[0.4576+1j*1.0780, 0.1560+1j*0.5017, 0.1535+1j*0.3849],
[0.1560+1j*0.5017, 0.4666+1j*1.0482, 0.1580+1j*0.4236],
[0.1535+1j*0.3849, 0.1580+1j*0.4236, 0.4615+1J*1.0651]
])
B_abc = np.array([
[5.6765, -1.8319, -0.6982],
[-1.8319, 5.9809, -1.1645],
[-0.6982, -1.1645, 5.3971]
])
Z_012 = imped2seq(Z_abc) / 1.609344 # ohms/km
B_012 = imped2seq(B_abc) / 1.609344 # micro Siemens/km
print('c1: {}'.format(B_012[1,1].real))
print('r1: {}'.format(Z_012[1,1].real))
print('x1: {}'.format(Z_012[1,1].imag))
print('c0: {}'.format(B_012[0,0].real))
print('r0: {}'.format(Z_012[0,0].real))
print('x0: {}'.format(Z_012[0,0].imag))
# Config 2
Z_abc = np.array([
[0.4666+1j*1.0482, 0.1580+1j*0.4236, 0.1560+1j*0.5017],
[0.1580+1j*0.4236, 0.4615+1j*1.0651, 0.1535+1j*0.3849],
[0.1560+1j*0.5017, 0.1535+1j*0.3849, 0.4576+1j*1.0780]
])
B_abc = np.array([
[5.9809, -1.1645, -1.8319],
[-1.1645, 5.3971, -0.6982],
[-1.8319, -0.6982, 5.6765]
])
Z_012 = imped2seq(Z_abc) / 1.609344 # ohms/km
B_012 = imped2seq(B_abc) / 1.609344 # micro Siemens/km
print('c1: {}'.format(B_012[1,1].real))
print('r1: {}'.format(Z_012[1,1].real))
print('x1: {}'.format(Z_012[1,1].imag))
print('c0: {}'.format(B_012[0,0].real))
print('r0: {}'.format(Z_012[0,0].real))
print('x0: {}'.format(Z_012[0,0].imag))
# Config 3
Z_abc = np.array([
[0.4615+1j*1.0651, 0.1535+1j*0.3849, 0.1580+1j*0.4236],
[0.1535+1j*0.3849, 0.4576+1j*1.0780, 0.1560+1j*0.5017],
[0.1580+1j*0.4236, 0.1560+1j*0.5017, 0.4666+1j*1.0482]
])
B_abc = np.array([
[5.3971, -0.6982, -1.1645],
[-0.6982, 5.6765, -1.8319],
[-1.1645,-1.8319, 5.9809]
])
Z_012 = imped2seq(Z_abc) / 1.609344 # ohms/km
B_012 = imped2seq(B_abc) / 1.609344 # micro Siemens/km
print('c1: {}'.format(B_012[1,1].real))
print('r1: {}'.format(Z_012[1,1].real))
print('x1: {}'.format(Z_012[1,1].imag))
print('c0: {}'.format(B_012[0,0].real))
print('r0: {}'.format(Z_012[0,0].real))
print('x0: {}'.format(Z_012[0,0].imag))
# Config 4
Z_abc = np.array([
[0.4615+1j*1.0651, 0.1580+1j*0.4236, 0.1535+1j*0.3849],
[0.1580+1j*0.4236, 0.4666+1j*1.0482, 0.1560+1j*0.5017],
[0.1535+1j*0.3849, 0.1560+1j*0.5017, 0.4576+1j*1.0780]
])
B_abc = np.array([
[5.3971, -1.1645, -0.6982],
[-1.1645, 5.9809, -1.8319],
[-0.6982,-1.8319, 5.6765]
])
Z_012 = imped2seq(Z_abc) / 1.609344 # ohms/km
B_012 = imped2seq(B_abc) / 1.609344 # micro Siemens/km
print('c1: {}'.format(B_012[1,1].real))
print('r1: {}'.format(Z_012[1,1].real))
print('x1: {}'.format(Z_012[1,1].imag))
print('c0: {}'.format(B_012[0,0].real))
print('r0: {}'.format(Z_012[0,0].real))
print('x0: {}'.format(Z_012[0,0].imag))
# Config 5
Z_abc = np.array([
[0.4666+1j*1.0482, 0.1560+1j*0.5017, 0.1580+1j*0.4236],
[0.1560+1j*0.5017, 0.4576+1j*1.0780, 0.1535+1j*0.3849],
[0.1580+1j*0.4236, 0.1535+1j*0.3849, 0.4615+1j*1.0651]
])
B_abc = np.array([
[5.9809, -1.8319, -1.1645]
[-1.8319, 5.6765, -0.6982]
[-1.1645,-0.6982, 5.3971]
])
Z_012 = imped2seq(Z_abc) / 1.609344 # ohms/km
B_012 = imped2seq(B_abc) / 1.609344 # micro Siemens/km
print('c1: {}'.format(B_012[1,1].real))
print('r1: {}'.format(Z_012[1,1].real))
print('x1: {}'.format(Z_012[1,1].imag))
print('c0: {}'.format(B_012[0,0].real))
print('r0: {}'.format(Z_012[0,0].real))
print('x0: {}'.format(Z_012[0,0].imag))
# Config 6
Z_abc = np.array([
[0.4576+1j*1.0780, 0.1535+1j*0.3849, 0.1560+1j*0.5017],
[0.1535+1j*0.3849, 0.4615+1j*1.0651, 0.1580+1j*0.4236],
[0.1560+1j*0.5017, 0.1580+1j*0.4236, 0.4666+1j*1.0482]
])
B_abc = np.array([
[5.6765, -0.6982, -1.8319],
[-0.6982, 5.3971, -1.1645],
[-1.8319,-1.1645, 5.9809]
])
Z_012 = imped2seq(Z_abc) / 1.609344 # ohms/km
B_012 = imped2seq(B_abc) / 1.609344 # micro Siemens/km
print('c1: {}'.format(B_012[1,1].real))
print('r1: {}'.format(Z_012[1,1].real))
print('x1: {}'.format(Z_012[1,1].imag))
print('c0: {}'.format(B_012[0,0].real))
print('r0: {}'.format(Z_012[0,0].real))
print('x0: {}'.format(Z_012[0,0].imag))
# Config 7
Z_abc = np.array([
[0.4576+1j*1.0780, 0.0000+1j*0.0000, 0.1535+1j*0.3849],
[0.0000+1j*0.0000, 0.0000+1j*0.0000, 0.0000+1j*0.0000],
[0.1535+1j*0.3849, 0.0000+1j*0.0000, 0.4615+1j*1.0651]
])
B_abc = np.array([
[5.1154, 0.0000, -1.0549],
[0.0000, 0.0000, 0.0000],
[-1.0549,0.0000, 5.1704],
])
Z_012 = imped2seq(Z_abc) / 1.609344 # ohms/km
B_012 = imped2seq(B_abc) / 1.609344 # micro Siemens/km
print('c1: {}'.format(B_012[1,1].real))
print('r1: {}'.format(Z_012[1,1].real))
print('x1: {}'.format(Z_012[1,1].imag))
print('c0: {}'.format(B_012[0,0].real))
print('r0: {}'.format(Z_012[0,0].real))
print('x0: {}'.format(Z_012[0,0].imag))
# Config 8
Z_abc = np.array([
[0.4576+1j*1.0780, 0.1535+1j*0.3849, 0.0000+1j*0.0000],
[0.1535+1j*0.3849, 0.4615+1j*1.0651, 0.0000+1j*0.0000],
[0.0000+1j*0.0000, 0.0000+1j*0.0000, 0.0000+1j*0.0000]
])
B_abc = np.array([
[5.1154, -1.0549, 0.0000],
[-1.0549, 5.1704, 0.0000],
[0.0000, 0.0000, 0.0000]
])
Z_012 = imped2seq(Z_abc) / 1.609344 # ohms/km
B_012 = imped2seq(B_abc) / 1.609344 # micro Siemens/km
print('c1: {}'.format(B_012[1,1].real))
print('r1: {}'.format(Z_012[1,1].real))
print('x1: {}'.format(Z_012[1,1].imag))
print('c0: {}'.format(B_012[0,0].real))
print('r0: {}'.format(Z_012[0,0].real))
print('x0: {}'.format(Z_012[0,0].imag))
# Config 9
Z_abc = np.array([
[1.3292+1j*1.3475, 0.0000+1j*0.0000, 0.0000+1j*0.0000],
[0.0000+1j*0.0000, 0.0000+1j*0.0000, 0.0000+1j*0.0000],
[0.0000+1j*0.0000, 0.0000+1j*0.0000, 0.0000+1j*0.0000]
])
B_abc = np.array([
[4.5193, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000]
])
Z_012 = imped2seq(Z_abc) / 1.609344 # ohms/km
B_012 = imped2seq(B_abc) / 1.609344 # micro Siemens/km
print('c1: {}'.format(B_012[1,1].real))
print('r1: {}'.format(Z_012[1,1].real))
print('x1: {}'.format(Z_012[1,1].imag))
print('c0: {}'.format(B_012[0,0].real))
print('r0: {}'.format(Z_012[0,0].real))
print('x0: {}'.format(Z_012[0,0].imag))
# Config 10
Z_abc = np.array([
[0.0000+1j*0.0000, 0.0000+1j*0.0000, 0.0000+1j*0.0000],
[0.0000+1j*0.0000, 1.3292+1j*1.3475, 0.0000+1j*0.0000],
[0.0000+1j*0.0000, 0.0000+1j*0.0000, 0.0000+1j*0.0000]
])
B_abc = np.array([
[0.0000, 0.0000, 0.0000],
[0.0000, 4.5193, 0.0000],
[0.0000, 0.0000, 0.0000]
])
Z_012 = imped2seq(Z_abc) / 1.609344 # ohms/km
B_012 = imped2seq(B_abc) / 1.609344 # micro Siemens/km
print('c1: {}'.format(B_012[1,1].real))
print('r1: {}'.format(Z_012[1,1].real))
print('x1: {}'.format(Z_012[1,1].imag))
print('c0: {}'.format(B_012[0,0].real))
print('r0: {}'.format(Z_012[0,0].real))
print('x0: {}'.format(Z_012[0,0].imag))
# Config 11
Z_abc = np.array([
[0.0000+1j*0.0000, 0.0000+1j*0.0000, 0.0000+1j*0.0000],
[0.0000+1j*0.0000, 0.0000+1j*0.0000, 0.0000+1j*0.0000],
[0.0000+1j*0.0000, 0.0000+1j*0.0000, 1.3292+1j*1.3475]
])
B_abc = np.array([
[0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 4.5193]
])
Z_012 = imped2seq(Z_abc) / 1.609344 # ohms/km
B_012 = imped2seq(B_abc) / 1.609344 # micro Siemens/km
print('c1: {}'.format(B_012[1,1].real))
print('r1: {}'.format(Z_012[1,1].real))
print('x1: {}'.format(Z_012[1,1].imag))
print('c0: {}'.format(B_012[0,0].real))
print('r0: {}'.format(Z_012[0,0].real))
print('x0: {}'.format(Z_012[0,0].imag))
# Config 12
Z_abc = np.array([
[1.5209+1j*0.7521, 0.5198+1j*0.2775, 0.4924+1j*0.2157],
[0.5198+1j*0.2775, 1.5329+1j*0.7162, 0.5198+1j*0.2775],
[0.4924+1j*0.2157, 0.5198+1j*0.2775, 1.5209+1j*0.7521]
])
B_abc = np.array([
[67.2242, 0.0000, 0.0000],
[0.0000, 67.2242, 0.0000],
[0.0000, 0.0000, 67.2242]
])
Z_012 = imped2seq(Z_abc) / 1.609344 # ohms/km
B_012 = imped2seq(B_abc) / 1.609344 # micro Siemens/km
print('c1: {}'.format(B_012[1,1].real))
print('r1: {}'.format(Z_012[1,1].real))
print('x1: {}'.format(Z_012[1,1].imag))
print('c0: {}'.format(B_012[0,0].real))
print('r0: {}'.format(Z_012[0,0].real))
print('x0: {}'.format(Z_012[0,0].imag))
| 32.948718
| 59
| 0.595998
| 1,869
| 8,995
| 2.783307
| 0.05725
| 0.103806
| 0.066897
| 0.103806
| 0.939254
| 0.915609
| 0.899077
| 0.861784
| 0.861784
| 0.861784
| 0
| 0.338679
| 0.126515
| 8,995
| 272
| 60
| 33.069853
| 0.323406
| 0.052918
| 0
| 0.709016
| 0
| 0
| 0.055208
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004098
| false
| 0
| 0.004098
| 0
| 0.012295
| 0.32377
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e5eb72a27afa5466ec9629b9d9f6697b1fb44735
| 9,506
|
py
|
Python
|
constants.py
|
the-aerospace-corporation/counter-reconnaissance-program
|
f87f21990a55b35090cc959f66ca5fdb1e4b5708
|
[
"MIT"
] | 3
|
2020-09-22T23:05:44.000Z
|
2022-02-16T08:39:27.000Z
|
constants.py
|
the-aerospace-corporation/counter-reconnaissance-program
|
f87f21990a55b35090cc959f66ca5fdb1e4b5708
|
[
"MIT"
] | null | null | null |
constants.py
|
the-aerospace-corporation/counter-reconnaissance-program
|
f87f21990a55b35090cc959f66ca5fdb1e4b5708
|
[
"MIT"
] | 1
|
2021-03-29T21:42:20.000Z
|
2021-03-29T21:42:20.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright © 2020 The Aerospace Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
# OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Nmap constants"""
BUFFER_SIZE = 16384 # arbitrary size, probably needs something else
NMAP_PROBES = {
b'\r\n\r\n': "Generic Lines",
b'GET / HTTP/1.0\r\n\r\n': "Get Request",
b'OPTIONS / HTTP/1.0\r\n\r\n': "HTTP Options",
b'OPTIONS / RTSP/1.0\r\n\r\n': "RTSP Request",
b'\x80\x00\x00(r\xfe\x1d\x13\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x86\xa0\x00\x01\x97|\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00': "TCP RPC Check",
b'\x00\x1e\x00\x06\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07version\x04bind\x00\x00\x10\x00\x03': "TCP DNS "
"Version Bind"
" Request "
}
PROBE_GENERIC_LINES = b'\r\n\r\n'
PROBE_GET_REQUEST = b'GET / HTTP/1.0\r\n\r\n'
PROBE_HTTP_OPTIONS = b'OPTIONS / HTTP/1.0\r\n\r\n'
PROBE_RTSP_REQUEST = b'OPTIONS / RTSP/1.0\r\n\r\n'
PROBE_TCP_RPC_CHECK = b'\x80\x00\x00(r\xfe\x1d\x13\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x86\xa0\x00\x01\x97|\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
# Missing equivalent UDP probe
PROBE_TCP_DNS_VERSION_BIND_REQ = b'\x00\x1e\x00\x06\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07version\x04bind\x00' \
b'\x00\x10\x00\x03'
# Missing equivalent UDP probe
PROBE_TCP_DNS_STATUS_REQUEST = b'\x00\x0c\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00'
PROBE_HELLO = b'EHLO\r\n'
# Missing equivalent UDP probe
PROBE_TCP_HELP = b'HELP\r\n'
PROBE_SSL_SESSION_REQ = b'\x16\x03\x00\x00S\x01\x00\x00O\x03\x00?G\xd7\xf7\xba,\xee\xea\xb2`~\xf3\x00\xfd\x82{\xb9' \
b'\xd5\x96\xc8w\x9b\xe6\xc4\xdb<=\xdbo\xef\x10n\x00\x00(\x00\x16\x00\x13\x00\n\x00f\x00\x05' \
b'\x00\x04\x00e\x00d\x00c\x00b\x00a\x00`\x00\x15\x00\x12\x00\t\x00\x14\x00\x11\x00\x08\x00' \
b'\x06\x00\x03\x01\x00'
PROBE_TSL_SESSION_REQ = b'\x16\x03\x00\x00i\x01\x00\x00e\x03\x03U\x1c\xa7\xe4random1random2random3random4\x00' \
b'\x00\x0c\x00/\x00\n\x00\x13\x009\x00\x04\x00\xff\x01\x00\x000\x00\r\x00,\x00*\x00\x01' \
b'\x00\x03\x00\x02\x06\x01\x06\x03\x06\x02\x02\x01\x02\x03\x02\x02\x03\x01\x03\x03\x03' \
b'\x02\x04\x01\x04\x03\x04\x02\x01\x01\x01\x03\x01\x02\x05\x01\x05\x03\x05\x02'
# Missing SSLv23SessionReq probe
PROBE_KERBEROS = b'\x00\x00\x00qj\x81n0\x81k\xa1\x03\x02\x01\x05\xa2\x03\x02\x01\n\xa4\x81^0\\\xa0\x07\x03' \
b'\x05\x00P\x80\x00\x10\xa2\x04\x1b\x02NM\xa3\x170\x15\xa0\x03\x02\x01\x00\xa1\x0e0\x0c\x1b' \
b'\x06krbtgt\x1b\x02NM\xa5\x11\x18\x0f19700101000000Z\xa7\x06\x02\x04\x1f\x1e\xb9\xd9\xa8' \
b'\x170\x15\x02\x01\x12\x02\x01\x11\x02\x01\x10\x02\x01\x17\x02\x01\x01\x02\x01\x03\x02\x01\x02'
PROBE_SMB_PROG_NEG = b'\0\0\0\xa4\xff\x53\x4d\x42\x72\0\0\0\0\x08\x01\x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x40\x06\0' \
b'\0\x01\0\0\x81\0\x02PC NETWORK PROGRAM 1.0\0\x02MICROSOFT NETWORKS 1.03\0\x02MICROSOFT N' \
b'ETWORKS 3.0\0\x02LANMAN1.0\0\x02LM1.2X002\0\x02Samba\0\x02NT LANMAN 1.0\0\x02NT LM 0.12\0'
PROBE_X11 = b'l\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00'
PROBE_404_REQ = b'GET /nice%20ports%2C/Tri%6Eity.txt%2ebak HTTP/1.0\r\n\r\n'
PROBE_LPD_STRING = b'\x01default\n'
# Missing equivalent UDP probe
PROBE_TCP_LDAP_SEARCH_REQ = b'0\x84\x00\x00\x00-\x02\x01\x07c\x84\x00\x00\x00$\x04\x00\n\x01\x00\n\x01\x00\x02\x01' \
b'\x00\x02\x01d\x01\x01\x00\x87\x0bobjectClass0\x84\x00\x00\x00\x00'
PROBE_LDAP_BIND_REQ = b'0\x0c\x02\x01\x01`\x07\x02\x01\x02\x04\x00\x80\x00'
# Missing equivalent UDP probe
PROBE_TCP_SIP_OPTIONS = b'OPTIONS sip:nm SIP/2.0\r\nVia: SIP/2.0/TCP nm;branch=foo\r\nFrom: <sip:nm@nm>;tag=root\r\n' \
b'To: <sip:nm2@nm2>\r\nCall-ID: 50000\r\nCSeq: 42 OPTIONS\r\nMax-Forwards: 70\r\nContent-Length: ' \
b'0\r\nContact: <sip:nm@nm>\r\nAccept: application/sdp\r\n\r\n'
PROBE_LANDESK_RC = b'TNMP\x04\x00\x00\x00TNME\x00\x00\x04\x00'
# Missing TerminalServerCookie
PROBE_TERMINAL_SERVER = b'\x03\x00\x00\x0b\x06\xe0\x00\x00\x00\x00\x00'
PROBE_NCP = b'DmdT\x00\x00\x00\x17\x00\x00\x00\x01\x00\x00\x00\x00\x11\x11\x00\xff\x01\xff\x13'
PROBE_NOTES_RPC = b':\x00\x00\x00/\x00\x00\x00\x02\x00\x00@\x02\x0f\x00\x01\x00=\x05\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00/\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00'
# Missing DistCCD
PROBE_JAVA_RMI = b'JRMI\x00\x02K'
# Missing Radmin
# Missing UDP Sqlping
# Missing UDP NTPRequest
# Missing NessusTPv12
# Missing NessusTPv11
# Missing NessusTPv10
# Missing UDP SNMPv1public
# Missing UDP SNMPv3GetRequest
PROBE_WMSRequest = b'\x01\x00\x00\xfd\xce\xfa\x0b\xb0\xa0\x00\x00\x00MMS\x14\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x12\x00\x00\x00\x01\x00\x03\x00\xf0\xf0\xf0\xf0\x0b\x00\x04' \
b'\x00\x1c\x00\x03\x00N\x00S\x00P\x00l\x00a\x00y\x00e\x00r\x00/\x009\x00.\x000\x00.\x000' \
b'\x00.\x002\x009\x008\x000\x00;\x00 \x00{\x000\x000\x000\x000\x00A\x00A\x000\x000\x00-\x000' \
b'\x00A\x000\x000\x00-\x000\x000\x00a\x000\x00-\x00A\x00A\x000\x00A\x00-\x000\x000\x000\x000' \
b'\x00A\x000\x00A\x00A\x000\x00A\x00A\x000\x00}\x00\x00\x00\xe0m\xdf_'
PROBE_ORACLE_TNS = b'\x00Z\x00\x00\x01\x00\x00\x00\x016\x01,\x00\x00\x08\x00\x7f\xff\x7f\x08\x00\x00\x00\x01' \
b'\x00 \x00:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x004\xe6\x00\x00' \
b'\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00(CONNECT_DATA=(COMMAND=version))'
# Missing UDP xdmcp
# Missing UDP AFSVersionRequest
# Missing OfficeScan
PROBE_MS_SQL_S = b'\x12\x01\x004\x00\x00\x00\x00\x00\x00\x15\x00\x06\x01\x00\x1b\x00\x01\x02\x00\x1c\x00\x0c\x03\x00' \
b'(\x00\x04\xff\x08\x00\x01U\x00\x00\x00MSSQLServer\x00H\x0f\x00\x00'
# Missing HELP4STOMP
# Missing Memcache
# Missing beast2
# Missing firebird
# Missing ibm-db2-das
# Missing ibm_db2
# Missing pervasive-relational
# Missing pervasive-btrieve
# Missing UDP ibm-db2-das-udp
# Missing ajp
# Missing UDP DNS-SD
# Missing hp-pjl
# Missing UDP Citrix
# Missing UDP Kerberos
# Missing UDP SqueezeCenter
PROBE_AFP = b'\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x0f\x00'
# Missing UDP Quake1_server_info
# Missing UDP Quake2_status
# Missing UDP Quake3_getstatus
# Missing UDP Quake3_master_getservers
# Missing SqueezeCenter_CLI
# Missing Arucer
# Missing UDP serialnumberd
# Missing dominoconsole
# Missing informix
# Missing drda
# Missing ibm-mqseries
# Missing apple-iphoto
# Missing ZendJavaBridge
# Missing UDP BackOrifice
# Missing gkrellm
# Missing metasploit-xmlrpc
# Missing mongodb
# Missing UDP sybaseanywhere
# Missing UDP vuze-dht
# Missing UDP pc-anywhere
# Missing UDP pc-duo
# Missing UDP pc-duo-gw
# Missing redis-server
# Missing UDP memcached
# Missing riak-pbc
# Missing tarantool
# Missing couchbase-data
# Missing epmd
# Missing vp3
# Missing kumo-server
# Missing metasploit-msgrpc
# Missing UDP svrloc
# Missing hazelcast-http
# Missing minecraft-ping
# Missing erlang-node
# Missing UDP Murmur
# Missing UDP Ventrilo
# Missing teamspeak-tcpquery-ver
# Missing UDP TeamSpeak2
# Missing UDP TeamSpeak3
# Missing xmlsysd
# Missing UDP FreelancerStatus
# Missing UDP AndroMouse
# Missing AirHID
# Missing UDP NetMotionMobility
# Missing docker
# Missing tor-versions
# Missing TLS-PSK
# Missing NJE
# Missing tn3270
PROBE_GIOP = b'GIOP\x01\x00\x01\x00$\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00' \
b'\x00abcdef\x00\x00\x04\x00\x00\x00get\x00\x00\x00\x00\x00'
# Missing OpenVPN
# Missing UDP OpenVPN
# Missing pcworx
# Missing proconos
# Missing niagara-fox
# Missing mqtt
# Missing UDP ipmi-rmcp
# Missing UDP coap-request
# Missing UDP DTLSSessionReq
# Missing iperf3
# Missing UDP QUIC
# Missing VersionRequest
# Missing NoMachine
# Missing JMON
# Missing LibreOfficeImpressSCPair
# Missing UDP ARD
# Missing LSCP
# Missing rotctl
# Missing SharpTV
| 47.768844
| 120
| 0.68704
| 1,639
| 9,506
| 3.938377
| 0.277608
| 0.217506
| 0.239814
| 0.24725
| 0.230984
| 0.201549
| 0.178931
| 0.144384
| 0.140511
| 0.112626
| 0
| 0.205816
| 0.164317
| 9,506
| 198
| 121
| 48.010101
| 0.606621
| 0.350095
| 0
| 0
| 0
| 0.691176
| 0.690732
| 0.600529
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e5f4df3e6160530aa12ba00d25ab011311bfad47
| 13,161
|
py
|
Python
|
ciphers/bat_lat.py
|
bat-team/BAT-Trail-Search
|
82f8b61fa3b434fb90da12b0273596ab272f51e6
|
[
"MIT"
] | null | null | null |
ciphers/bat_lat.py
|
bat-team/BAT-Trail-Search
|
82f8b61fa3b434fb90da12b0273596ab272f51e6
|
[
"MIT"
] | null | null | null |
ciphers/bat_lat.py
|
bat-team/BAT-Trail-Search
|
82f8b61fa3b434fb90da12b0273596ab272f51e6
|
[
"MIT"
] | null | null | null |
'''
Created on Mar 1, 2019
@author: Shawn
'''
LAT = [
(8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, -2, 2, 0, 0, 2, 2, 0, 4, 2, -2, 0, 0, -2, -2, 0, 0, 4, 4, 0, 0, 4, -4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 4, -2, 2, 0, 0, 2, 2, 0, -4, -2, 2, 0, 0, 2, -2, 4, 0, 2, 2, 0, 0, -2, 2, 4, 0, -2, -2, 0, 4, 0, 0, 0, 4, 0, 0, 0, -4, 0, 0, 0, 4, 0, 0, 0, 0, 2, -2, 4, 0, 2, 2, 0, 0, 2, -2, -4, 0, 2, 2, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, -4, 0, 0, 0, 4, 0, 0, 2, 0, -2, 0, 2, 0, -2, 4, 2, 0, 2, 0, -2, 4, -2, 0, 2, 2, 0, 0, -2, -2, 0, 4, 2, -2, 0, 0, 2, -2, 4, 0, -2, 0, 2, 0, -2, 0, 2, 4, -2, 4, 2, 0, 2, 0, -2, 0, 2, 2, 0, 0, -2, -2, 0, -4, 2, 2, 4, 0, 2, 2, 0, 0, -2, 2, 0, 0, 2, -2, 0, 0, 2, 2, -4, 4, 2, 2, 0, 0, -2, -4, 2, 4, 2, 0, -2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, -2, 0, 0, -2, 2, 0, 0, -2, 2, 0, 4, -2, 2, 4, 0, -2, 0, -2, -4, 2, 4, 2, 0, 2, 0, 2, 0, 2, 0, 2),
(0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 2, -2, 0, 0, 2, 2, 4, 0, -2, 2, 0, 0, -2, -2, 0, 0, 4, 4, 0, 0, -4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 4, 0, 2, -2, 0, 0, 2, 2, -4, 0, 2, -2, 0, 0, -2, 2, 0, 4, 2, 2, 0, 0, 2, -2, 0, 4, -2, -2, 4, 0, 0, 0, 4, 0, 0, 0, -4, 0, 0, 0, 4, 0, 0, 0, 0, 0, -2, 2, 0, 4, 2, 2, 0, 0, -2, 2, 0, -4, 2, 2, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 0, -4, 0, 0, 0, 4, 2, 0, -2, 0, 2, 0, -2, 0, 2, 4, 2, 0, -2, 0, -2, 4, 2, 0, 0, 2, -2, 0, 0, -2, 2, 4, 0, -2, 2, 0, 4, -2, -2, 0, 2, 0, -2, 0, 2, 0, -2, 4, 2, 4, 2, 0, -2, 0, 2, 0, 0, 2, -2, 0, 0, -2, 2, -4, 4, 2, 2, 0, 0, 2, -2, 0, 0, 2, 2, 0, 0, -2, 2, 0, -4, 2, 2, 4, 0, 2, -2, 0, 2, -4, 2, 4, -2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 0, -2, -2, 0, 0, 2, -2, 0, 0, 2, -2, 4, 4, 2, -2, 0, -2, 0, 2, -4, 2, 4, 2, 0, 2, 0, 2, 0, 2, 0),
(0, 0, 4, 4, 0, 0, 4, -4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 4, -2, 2, 0, 0, 2, 2, 0, -4, -2, 2, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, -2, 2, 0, 0, 2, 2, 0, 4, 2, -2, 0, 0, -2, -2, 0, 0, 2, -2, 4, 0, 2, 2, 0, 0, 2, -2, -4, 0, 2, 2, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, -4, 0, 0, 0, 4, 0, 0, 0, 2, -2, 4, 0, 2, 2, 0, 0, -2, 2, 4, 0, -2, -2, 0, 4, 0, 0, 0, 4, 0, 0, 0, -4, 0, 0, 0, 4, 0, 0, 0, -2, 0, 2, 0, -2, 0, 2, 4, -2, 4, 2, 0, 2, 0, -2, 0, 2, 2, 0, 0, -2, -2, 0, -4, 2, 2, 4, 0, 2, 2, 0, 0, 2, 0, -2, 0, 2, 0, -2, 4, 2, 0, 2, 0, -2, 4, -2, 0, 2, 2, 0, 0, -2, -2, 0, 4, 2, -2, 0, 0, 2, -2, 4, 0, 2, -2, 0, 0, -2, 2, 0, 0, -2, 2, 0, 4, -2, 2, 4, 0, -2, 0, -2, -4, 2, 4, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, -2, 2, 0, 0, 2, -2, 0, 0, 2, 2, -4, 4, 2, 2, 0, 0, -2, -4, 2, 4, 2, 0, -2, 0, 2, 0, 2, 0, 2, 0, 2),
(0, 0, 4, 4, 0, 0, -4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 4, 0, 2, -2, 0, 0, 2, 2, -4, 0, 2, -2, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 2, -2, 0, 0, 2, 2, 4, 0, -2, 2, 0, 0, -2, -2, 0, 0, -2, 2, 0, 4, 2, 2, 0, 0, -2, 2, 0, -4, 2, 2, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 0, -4, 0, 0, 0, 4, 0, 0, -2, 2, 0, 4, 2, 2, 0, 0, 2, -2, 0, 4, -2, -2, 4, 0, 0, 0, 4, 0, 0, 0, -4, 0, 0, 0, 4, 0, 0, 0, -2, 0, 2, 0, -2, 0, 2, 0, -2, 4, 2, 4, 2, 0, -2, 0, 2, 0, 0, 2, -2, 0, 0, -2, 2, -4, 4, 2, 2, 0, 0, 2, 2, 0, -2, 0, 2, 0, -2, 0, 2, 4, 2, 0, -2, 0, -2, 4, 2, 0, 0, 2, -2, 0, 0, -2, 2, 4, 0, -2, 2, 0, 4, -2, 2, 0, 0, -2, -2, 0, 0, 2, -2, 0, 0, 2, -2, 4, 4, 2, -2, 0, -2, 0, 2, -4, 2, 4, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, 0, 2, 2, 0, 0, -2, 2, 0, -4, 2, 2, 4, 0, 2, -2, 0, 2, -4, 2, 4, -2, 0, 2, 0, 2, 0, 2, 0, 2, 0),
(0, 0, 2, -2, 4, 0, 2, 2, 0, 0, -2, 2, 4, 0, -2, -2, 0, 4, 0, 0, 0, 4, 0, 0, 0, -4, 0, 0, 0, 4, 0, 0, 0, 0, 2, -2, 4, 0, 2, 2, 0, 0, 2, -2, -4, 0, 2, 2, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, -4, 0, 0, 0, 4, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, -2, 2, 0, 0, 2, 2, 0, 4, 2, -2, 0, 0, -2, -2, 0, 0, 4, 4, 0, 0, 4, -4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 4, -2, 2, 0, 0, 2, 2, 0, -4, -2, 2, 0, -2, 2, 0, 0, 2, -2, 0, 0, 2, 2, -4, 4, 2, 2, 0, 0, -2, -4, 2, 4, 2, 0, -2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, -2, 0, 0, -2, 2, 0, 0, -2, 2, 0, 4, -2, 2, 4, 0, -2, 0, -2, -4, 2, 4, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, 2, 0, -2, 4, 2, 0, 2, 0, -2, 4, -2, 0, 2, 2, 0, 0, -2, -2, 0, 4, 2, -2, 0, 0, 2, -2, 4, 0, -2, 0, 2, 0, -2, 0, 2, 4, -2, 4, 2, 0, 2, 0, -2, 0, 2, 2, 0, 0, -2, -2, 0, -4, 2, 2, 4, 0, 2, 2, 0),
(0, 0, -2, 2, 0, 4, 2, 2, 0, 0, 2, -2, 0, 4, -2, -2, 4, 0, 0, 0, 4, 0, 0, 0, -4, 0, 0, 0, 4, 0, 0, 0, 0, 0, -2, 2, 0, 4, 2, 2, 0, 0, -2, 2, 0, -4, 2, 2, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 0, -4, 0, 0, 0, 4, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 2, -2, 0, 0, 2, 2, 4, 0, -2, 2, 0, 0, -2, -2, 0, 0, 4, 4, 0, 0, -4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 4, 0, 2, -2, 0, 0, 2, 2, -4, 0, 2, -2, -2, 0, 0, 2, 2, 0, 0, -2, 2, 0, -4, 2, 2, 4, 0, 2, -2, 0, 2, -4, 2, 4, -2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 0, -2, -2, 0, 0, 2, -2, 0, 0, 2, -2, 4, 4, 2, -2, 0, -2, 0, 2, -4, 2, 4, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, 2, 0, -2, 0, 2, 4, 2, 0, -2, 0, -2, 4, 2, 0, 0, 2, -2, 0, 0, -2, 2, 4, 0, -2, 2, 0, 4, -2, -2, 0, 2, 0, -2, 0, 2, 0, -2, 4, 2, 4, 2, 0, -2, 0, 2, 0, 0, 2, -2, 0, 0, -2, 2, -4, 4, 2, 2, 0, 0, 2),
(0, 0, 2, -2, 4, 0, 2, 2, 0, 0, 2, -2, -4, 0, 2, 2, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, -4, 0, 0, 0, 4, 0, 0, 0, 2, -2, 4, 0, 2, 2, 0, 0, -2, 2, 4, 0, -2, -2, 0, 4, 0, 0, 0, 4, 0, 0, 0, -4, 0, 0, 0, 4, 0, 0, 0, 0, 4, 4, 0, 0, 4, -4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 4, -2, 2, 0, 0, 2, 2, 0, -4, -2, 2, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, -2, 2, 0, 0, 2, 2, 0, 4, 2, -2, 0, 0, -2, -2, 0, 2, -2, 0, 0, -2, 2, 0, 0, -2, 2, 0, 4, -2, 2, 4, 0, -2, 0, -2, -4, 2, 4, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, -2, 2, 0, 0, 2, -2, 0, 0, 2, 2, -4, 4, 2, 2, 0, 0, -2, -4, 2, 4, 2, 0, -2, 0, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, 2, 0, -2, 0, 2, 4, -2, 4, 2, 0, 2, 0, -2, 0, 2, 2, 0, 0, -2, -2, 0, -4, 2, 2, 4, 0, 2, 2, 0, 0, 2, 0, -2, 0, 2, 0, -2, 4, 2, 0, 2, 0, -2, 4, -2, 0, 2, 2, 0, 0, -2, -2, 0, 4, 2, -2, 0, 0, 2, -2, 4),
(0, 0, -2, 2, 0, 4, 2, 2, 0, 0, -2, 2, 0, -4, 2, 2, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 0, -4, 0, 0, 0, 4, 0, 0, -2, 2, 0, 4, 2, 2, 0, 0, 2, -2, 0, 4, -2, -2, 4, 0, 0, 0, 4, 0, 0, 0, -4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 4, 4, 0, 0, -4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 4, 0, 2, -2, 0, 0, 2, 2, -4, 0, 2, -2, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 2, -2, 0, 0, 2, 2, 4, 0, -2, 2, 0, 0, -2, -2, 2, 0, 0, -2, -2, 0, 0, 2, -2, 0, 0, 2, -2, 4, 4, 2, -2, 0, -2, 0, 2, -4, 2, 4, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, 0, 2, 2, 0, 0, -2, 2, 0, -4, 2, 2, 4, 0, 2, -2, 0, 2, -4, 2, 4, -2, 0, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, 2, 0, -2, 0, 2, 0, -2, 4, 2, 4, 2, 0, -2, 0, 2, 0, 0, 2, -2, 0, 0, -2, 2, -4, 4, 2, 2, 0, 0, 2, 2, 0, -2, 0, 2, 0, -2, 0, 2, 4, 2, 0, -2, 0, -2, 4, 2, 0, 0, 2, -2, 0, 0, -2, 2, 4, 0, -2, 2, 0, 4, -2),
(0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 4, 2, -2, 0, 0, -2, -2, 0, 4, -2, 2, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, 4, -4, 0, 0, 2, 2, 0, -4, -2, 2, 0, 0, 2, 2, 0, 4, -2, 2, 0, 0, -2, 2, 4, 0, -2, -2, 0, 0, 2, -2, 4, 0, 2, 2, 0, -4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 0, 2, -2, -4, 0, 2, 2, 0, 0, 2, -2, 4, 0, 2, 2, 0, 0, -4, 0, 0, 0, 4, 0, 0, 0, 0, 4, 0, 0, 0, 4, 4, 2, 0, 2, 0, -2, 4, -2, 0, 2, 0, -2, 0, 2, 0, -2, 4, 2, -2, 0, 0, 2, -2, 4, 0, 2, 2, 0, 0, -2, -2, 0, 4, -2, 4, 2, 0, 2, 0, -2, 0, -2, 0, 2, 0, -2, 0, 2, -4, 2, 2, 4, 0, 2, 2, 0, 0, 2, 2, 0, 0, -2, -2, 0, 0, 2, 2, -4, 4, 2, 2, 0, 0, -2, 2, 0, 0, 2, -2, 0, 0, 2, 0, 2, 0, 2, 0, 2, 0, -2, -4, 2, 4, 2, 0, -2, 0, -2, 2, 0, 4, -2, 2, 4, 0, 2, -2, 0, 0, -2, 2, 0, 0, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, -2, -4, 2, 4, 2),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 4, 0, -2, 2, 0, 0, -2, -2, 4, 0, 2, -2, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, -4, 4, 0, 0, 2, 2, -4, 0, 2, -2, 0, 0, 2, 2, 4, 0, 2, -2, 0, 0, 2, -2, 0, 4, -2, -2, 0, 0, -2, 2, 0, 4, 2, 2, -4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, -2, 2, 0, -4, 2, 2, 0, 0, -2, 2, 0, 4, 2, 2, 0, 0, 0, -4, 0, 0, 0, 4, 0, 0, 4, 0, 0, 0, 4, 0, 2, 4, 2, 0, -2, 0, -2, 4, 2, 0, -2, 0, 2, 0, -2, 0, 2, 4, 0, -2, 2, 0, 4, -2, 2, 0, 0, 2, -2, 0, 0, -2, -2, 4, 2, 4, 2, 0, -2, 0, -2, 0, 2, 0, -2, 0, 2, 0, 2, -4, 4, 2, 2, 0, 0, 2, 2, 0, 0, 2, -2, 0, 0, -2, 2, 0, -4, 2, 2, 4, 0, 2, -2, 0, 0, 2, 2, 0, 0, -2, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, 2, -4, 2, 4, -2, 0, -2, 0, 0, 2, -2, 4, 4, 2, 2, 0, 0, -2, -2, 0, 0, 2, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, -2, 0, 2, -4, 2, 4),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, 4, -4, 0, 0, 2, 2, 0, -4, -2, 2, 0, 0, 2, 2, 0, 4, -2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 4, 2, -2, 0, 0, -2, -2, 0, 4, -2, 2, 0, 0, 2, 2, 0, 0, 2, -2, -4, 0, 2, 2, 0, 0, 2, -2, 4, 0, 2, 2, 0, 0, -4, 0, 0, 0, 4, 0, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, -2, 2, 4, 0, -2, -2, 0, 0, 2, -2, 4, 0, 2, 2, 0, -4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 4, -2, 4, 2, 0, 2, 0, -2, 0, -2, 0, 2, 0, -2, 0, 2, -4, 2, 2, 4, 0, 2, 2, 0, 0, 2, 2, 0, 0, -2, -2, 0, 4, 2, 0, 2, 0, -2, 4, -2, 0, 2, 0, -2, 0, 2, 0, -2, 4, 2, -2, 0, 0, 2, -2, 4, 0, 2, 2, 0, 0, -2, -2, 0, 0, -2, 2, 0, 4, -2, 2, 4, 0, 2, -2, 0, 0, -2, 2, 0, 0, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, -2, -4, 2, 4, 2, 0, 2, 2, -4, 4, 2, 2, 0, 0, -2, 2, 0, 0, 2, -2, 0, 0, 2, 0, 2, 0, 2, 0, 2, 0, -2, -4, 2, 4, 2, 0, -2),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, -4, 4, 0, 0, 2, 2, -4, 0, 2, -2, 0, 0, 2, 2, 4, 0, 2, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 4, 0, -2, 2, 0, 0, -2, -2, 4, 0, 2, -2, 0, 0, 2, 2, 0, 0, -2, 2, 0, -4, 2, 2, 0, 0, -2, 2, 0, 4, 2, 2, 0, 0, 0, -4, 0, 0, 0, 4, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 2, -2, 0, 4, -2, -2, 0, 0, -2, 2, 0, 4, 2, 2, -4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, -2, 4, 2, 4, 2, 0, -2, 0, -2, 0, 2, 0, -2, 0, 2, 0, 2, -4, 4, 2, 2, 0, 0, 2, 2, 0, 0, 2, -2, 0, 0, -2, 2, 4, 2, 0, -2, 0, -2, 4, 2, 0, -2, 0, 2, 0, -2, 0, 2, 4, 0, -2, 2, 0, 4, -2, 2, 0, 0, 2, -2, 0, 0, -2, -2, 0, 0, 2, -2, 4, 4, 2, 2, 0, 0, -2, -2, 0, 0, 2, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, -2, 0, 2, -4, 2, 4, 2, 0, -4, 2, 2, 4, 0, 2, -2, 0, 0, 2, 2, 0, 0, -2, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, 2, -4, 2, 4, -2, 0),
(0, 0, -2, 2, 4, 0, -2, -2, 0, 0, 2, -2, 4, 0, 2, 2, 0, -4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 0, 2, -2, -4, 0, 2, 2, 0, 0, 2, -2, 4, 0, 2, 2, 0, 0, -4, 0, 0, 0, 4, 0, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 4, 2, -2, 0, 0, -2, -2, 0, 4, -2, 2, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, 4, -4, 0, 0, 2, 2, 0, -4, -2, 2, 0, 0, 2, 2, 0, 4, -2, 2, 0, 2, 2, -4, 4, 2, 2, 0, 0, -2, 2, 0, 0, 2, -2, 0, 0, 2, 0, 2, 0, 2, 0, 2, 0, -2, -4, 2, 4, 2, 0, -2, 0, -2, 2, 0, 4, -2, 2, 4, 0, 2, -2, 0, 0, -2, 2, 0, 0, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, -2, -4, 2, 4, 2, 4, 2, 0, 2, 0, -2, 4, -2, 0, 2, 0, -2, 0, 2, 0, -2, 4, 2, -2, 0, 0, 2, -2, 4, 0, 2, 2, 0, 0, -2, -2, 0, 4, -2, 4, 2, 0, 2, 0, -2, 0, -2, 0, 2, 0, -2, 0, 2, -4, 2, 2, 4, 0, 2, 2, 0, 0, 2, 2, 0, 0, -2, -2, 0),
(0, 0, 2, -2, 0, 4, -2, -2, 0, 0, -2, 2, 0, 4, 2, 2, -4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, -2, 2, 0, -4, 2, 2, 0, 0, -2, 2, 0, 4, 2, 2, 0, 0, 0, -4, 0, 0, 0, 4, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 4, 0, -2, 2, 0, 0, -2, -2, 4, 0, 2, -2, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, -4, 4, 0, 0, 2, 2, -4, 0, 2, -2, 0, 0, 2, 2, 4, 0, 2, -2, 2, 0, -4, 2, 2, 4, 0, 2, -2, 0, 0, 2, 2, 0, 0, -2, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, 2, -4, 2, 4, -2, 0, -2, 0, 0, 2, -2, 4, 4, 2, 2, 0, 0, -2, -2, 0, 0, 2, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, -2, 0, 2, -4, 2, 4, 2, 4, 2, 0, -2, 0, -2, 4, 2, 0, -2, 0, 2, 0, -2, 0, 2, 4, 0, -2, 2, 0, 4, -2, 2, 0, 0, 2, -2, 0, 0, -2, -2, 4, 2, 4, 2, 0, -2, 0, -2, 0, 2, 0, -2, 0, 2, 0, 2, -4, 4, 2, 2, 0, 0, 2, 2, 0, 0, 2, -2, 0, 0, -2),
(0, 0, 2, -2, -4, 0, 2, 2, 0, 0, 2, -2, 4, 0, 2, 2, 0, 0, -4, 0, 0, 0, 4, 0, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, -2, 2, 4, 0, -2, -2, 0, 0, 2, -2, 4, 0, 2, 2, 0, -4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, 4, -4, 0, 0, 2, 2, 0, -4, -2, 2, 0, 0, 2, 2, 0, 4, -2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 4, 2, -2, 0, 0, -2, -2, 0, 4, -2, 2, 0, 0, 2, 2, 0, -2, 2, 0, 4, -2, 2, 4, 0, 2, -2, 0, 0, -2, 2, 0, 0, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, -2, -4, 2, 4, 2, 0, 2, 2, -4, 4, 2, 2, 0, 0, -2, 2, 0, 0, 2, -2, 0, 0, 2, 0, 2, 0, 2, 0, 2, 0, -2, -4, 2, 4, 2, 0, -2, 4, -2, 4, 2, 0, 2, 0, -2, 0, -2, 0, 2, 0, -2, 0, 2, -4, 2, 2, 4, 0, 2, 2, 0, 0, 2, 2, 0, 0, -2, -2, 0, 4, 2, 0, 2, 0, -2, 4, -2, 0, 2, 0, -2, 0, 2, 0, -2, 4, 2, -2, 0, 0, 2, -2, 4, 0, 2, 2, 0, 0, -2, -2, 0),
(0, 0, -2, 2, 0, -4, 2, 2, 0, 0, -2, 2, 0, 4, 2, 2, 0, 0, 0, -4, 0, 0, 0, 4, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 2, -2, 0, 4, -2, -2, 0, 0, -2, 2, 0, 4, 2, 2, -4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, -4, 4, 0, 0, 2, 2, -4, 0, 2, -2, 0, 0, 2, 2, 4, 0, 2, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 4, 0, -2, 2, 0, 0, -2, -2, 4, 0, 2, -2, 0, 0, 2, 2, -2, 0, 0, 2, -2, 4, 4, 2, 2, 0, 0, -2, -2, 0, 0, 2, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, -2, 0, 2, -4, 2, 4, 2, 0, -4, 2, 2, 4, 0, 2, -2, 0, 0, 2, 2, 0, 0, -2, 2, 0, 2, 0, 2, 0, 2, 0, -2, 0, 2, -4, 2, 4, -2, 0, -2, 4, 2, 4, 2, 0, -2, 0, -2, 0, 2, 0, -2, 0, 2, 0, 2, -4, 4, 2, 2, 0, 0, 2, 2, 0, 0, 2, -2, 0, 0, -2, 2, 4, 2, 0, -2, 0, -2, 4, 2, 0, -2, 0, 2, 0, -2, 0, 2, 4, 0, -2, 2, 0, 4, -2, 2, 0, 0, 2, -2, 0, 0, -2),
]
| 548.375
| 818
| 0.313578
| 4,104
| 13,161
| 1.005604
| 0.002924
| 0.448752
| 0.343106
| 0.303368
| 0.992488
| 0.992488
| 0.992488
| 0.992488
| 0.992488
| 0.992488
| 0
| 0.456326
| 0.317149
| 13,161
| 23
| 819
| 572.217391
| 0.002893
| 0.002811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
f9200221b33cbcd9e64290faad6daecaedce1733
| 32,505
|
py
|
Python
|
pygdpr/models/dpa/greece/__init__.py
|
GDPRxiv/crawler
|
178ef9ff6c3641ba8b761a49e42c2579e453c1ca
|
[
"MIT"
] | null | null | null |
pygdpr/models/dpa/greece/__init__.py
|
GDPRxiv/crawler
|
178ef9ff6c3641ba8b761a49e42c2579e453c1ca
|
[
"MIT"
] | 2
|
2022-02-19T06:56:03.000Z
|
2022-02-19T07:00:00.000Z
|
pygdpr/models/dpa/greece/__init__.py
|
GDPRxiv/crawler
|
178ef9ff6c3641ba8b761a49e42c2579e453c1ca
|
[
"MIT"
] | null | null | null |
import os
import math
import socket
import sys
import requests
import json
import datetime
import hashlib
import dateparser
import requests.exceptions
import urllib3.exceptions
from pygdpr.models.dpa import DPA
from bs4 import BeautifulSoup
from pygdpr.services.filename_from_path_service import filename_from_path_service
from pygdpr.services.pdf_to_text_service import PDFToTextService
from pygdpr.specifications import pdf_file_extension_specification
from pygdpr.specifications.should_retain_document_specification import ShouldRetainDocumentSpecification
from pygdpr.models.common.pagination import Pagination
from pygdpr.policies.gdpr_policy import GDPRPolicy
import textract
class Greece(DPA):
def __init__(self, path=os.curdir):
country_code='GR'
super().__init__(country_code, path)
def update_pagination(self, pagination=None, page_soup=None, driver=None, start_path=None):
source = {
"host": "https://www.dpa.gr"
# "start_path": "/portal/page?_pageid=33,43547&_dad=portal&_schema=PORTAL"
#"start_path": "/el/enimerwtiko/prakseisArxis?field_year_from=2018&field_year_to=&field_category=239&field_thematic=All&field_protocol_number=&field_keywords=&page=0"
}
host = source['host']
if pagination is None:
pagination = Pagination()
pagination.add_item(host + start_path)
return pagination
def get_source(self, page_url=None, driver=None):
assert (page_url is not None)
results_response = None
try:
results_response = requests.request('GET', page_url)
results_response.raise_for_status()
except requests.exceptions.HTTPError as error:
pass
return results_response
def get_docs(self, existing_docs=[], overwrite=False, to_print=True):
added_docs = []
# call all the get_docs_X() functions
added_docs += self.get_docs_Decisions(existing_docs=[], overwrite=False, to_print=True)
added_docs += self.get_docs_Recommendations(existing_docs=[], overwrite=False, to_print=True)
added_docs += self.get_docs_Opinions(existing_docs=[], overwrite=False, to_print=True)
added_docs += self.get_docs_AnnualReports(existing_docs=[], overwrite=False, to_print=True)
added_docs += self.get_docs_Guidelines(existing_docs=[], overwrite=False, to_print=True)
return added_docs
# TODO: Run this overnight
def get_docs_Decisions(self, existing_docs=[], overwrite=False, to_print=True):
print('------------ GETTING DECISIONS ------------')
added_docs = []
pagination = self.update_pagination(start_path='/el/enimerwtiko/prakseisArxis?field_year_from=2018&field_year_to=&field_category=239&field_thematic=All&field_protocol_number=&field_keywords=&page=0')
# This list stores the pages we have visited
visited_pages = []
while pagination.has_next():
page_url = pagination.get_next()
if to_print:
print('Page:\t', page_url)
page_source = self.get_source(page_url=page_url)
if page_source is None:
continue
results_soup = BeautifulSoup(page_source.text, 'html.parser')
assert results_soup
tbody = results_soup.find('tbody')
assert tbody
iterator = 1
for tr in tbody.find_all('tr'):
assert tr
print("\n------------ Document " + str(iterator) + " ------------")
iterator += 1
date_tag = tr.find('time', datetime='00Z')
assert date_tag
date_str = date_tag.get_text()
print('\tDocument Date: ' + date_str)
document_year = date_str[-4:]
if int(document_year) < 2018:
print("\tSkipping outdated document")
continue
a_tag = tr.find('a')
document_title = a_tag.get_text()
assert document_title
print('\tDocument Title: ' + document_title)
# Create the document has using the document title
document_hash = hashlib.md5(document_title.encode()).hexdigest()
if document_hash in existing_docs and overwrite is False:
if to_print:
print('\tSkipping existing document:\t', document_hash)
continue
document_href = a_tag.get('href')
assert document_href
if document_href.startswith('http'):
document_url = document_href
else:
document_url = 'https://www.dpa.gr' + document_href
if to_print:
print("\tDocument:\t", document_hash)
# Get document response object
document_response = None
try:
document_response = requests.request('GET', document_url, timeout=3)
document_response.raise_for_status()
except requests.exceptions.HTTPError as error:
if to_print:
print(error)
pass
if document_response is None:
continue
document_soup = BeautifulSoup(document_response.text, 'html.parser')
assert document_soup
span_tag = document_soup.find('span', class_='file-link')
assert span_tag
span_a_tag = span_tag.find('a')
assert span_a_tag
pdf_href = span_a_tag.get('href')
assert pdf_href
if pdf_href.startswith('http'):
pdf_url = pdf_href
else:
pdf_url = 'https://www.dpa.gr' + pdf_href
pdf_response = None
try:
pdf_response = requests.request('GET', pdf_url, timeout=3)
pdf_response.raise_for_status()
except requests.exceptions.HTTPError as error:
if to_print:
print(error)
pass
if pdf_response is None:
continue
dpa_folder = self.path
# document_folder = dpa_folder + '/' + 'Decisions' + '/' + document_hash
document_folder = dpa_folder + '/greece' + '/' + 'Decisions' + '/' + document_hash
try:
os.makedirs(document_folder)
except FileExistsError:
pass
# TODO: Ask about self.language_code -> its 'en' right now...
with open(document_folder + '/' + self.language_code + '.pdf', 'wb') as f:
f.write(pdf_response.content)
with open(document_folder + '/' + self.language_code + '.txt', 'wb') as f:
link_text = textract.process(document_folder + '/' + self.language_code + '.pdf')
f.write(link_text)
with open(document_folder + '/' + 'metadata.json', 'w') as f:
metadata = {
'title': {
self.language_code: document_title
},
'md5': document_hash,
'releaseDate': date_str,
'url': document_url
}
json.dump(metadata, f, indent=4, sort_keys=True)
added_docs.append(document_hash)
# Add the next page to the pagination object
ul = results_soup.find('ul', class_='pagination js-pager__items')
assert ul
# The last two li tags contain links for 'next' and 'end' buttons
li_list = ul.find_all('li')
assert li_list
# The second to last element should contain the 'next page' link
a = li_list[-2].find('a')
assert a
a_href = a.get('href')
assert a_href
page_link = 'https://www.dpa.gr/el/enimerwtiko/prakseisArxis' + a_href
# If the index where next page usually is leads to a page that scraper has already visited -> don't add
# to pagination object
if page_link in visited_pages:
print("There seems to be no more pages to look at")
continue
else:
print("\nNext Page: " + page_link)
visited_pages.append(page_link)
pagination.add_item(page_link)
return added_docs
def get_docs_Recommendations(self, existing_docs=[], overwrite=False, to_print=True):
print('------------ GETTING RECOMMENDATIONS ------------')
added_docs = []
pagination = self.update_pagination(start_path='/el/enimerwtiko/prakseisArxis?field_year_from=&field_year_to=&field_category=246&field_thematic=All&field_protocol_number=&field_keywords=')
# This list stores the pages we have visited
visited_pages = []
while pagination.has_next():
page_url = pagination.get_next()
if to_print:
print('Page:\t', page_url)
page_source = self.get_source(page_url=page_url)
if page_source is None:
continue
results_soup = BeautifulSoup(page_source.text, 'html.parser')
assert results_soup
tbody = results_soup.find('tbody')
assert tbody
iterator = 1
for tr in tbody.find_all('tr'):
assert tr
print("\n------------ Document " + str(iterator) + " ------------")
iterator += 1
date_tag = tr.find('time', datetime='00Z')
assert date_tag
date_str = date_tag.get_text()
print('\tDocument Date: ' + date_str)
document_year = date_str[-4:]
if int(document_year) < 2018:
print("\tSkipping outdated document")
continue
a_tag = tr.find('a')
document_title = a_tag.get_text()
assert document_title
print('\tDocument Title: ' + document_title)
# Create the document has using the document title
document_hash = hashlib.md5(document_title.encode()).hexdigest()
if document_hash in existing_docs and overwrite is False:
if to_print:
print('\tSkipping existing document:\t', document_hash)
continue
document_href = a_tag.get('href')
assert document_href
if document_href.startswith('http'):
document_url = document_href
else:
document_url = 'https://www.dpa.gr' + document_href
if to_print:
print("\tDocument:\t", document_hash)
# Get document response object
document_response = None
try:
document_response = requests.request('GET', document_url)
document_response.raise_for_status()
except requests.exceptions.HTTPError as error:
if to_print:
print(error)
pass
if document_response is None:
continue
document_soup = BeautifulSoup(document_response.text, 'html.parser')
assert document_soup
span_tag = document_soup.find('span', class_='file-link')
assert span_tag
span_a_tag = span_tag.find('a')
assert span_a_tag
pdf_href = span_a_tag.get('href')
assert pdf_href
if pdf_href.startswith('http'):
pdf_url = pdf_href
else:
pdf_url = 'https://www.dpa.gr' + pdf_href
pdf_response = None
try:
pdf_response = requests.request('GET', pdf_url)
pdf_response.raise_for_status()
except requests.exceptions.HTTPError as error:
if to_print:
print(error)
pass
if pdf_response is None:
continue
dpa_folder = self.path
# document_folder = dpa_folder + '/' + 'Recommendations' + '/' + document_hash
document_folder = dpa_folder + '/greece' + '/' + 'Recommendations' + '/' + document_hash
try:
os.makedirs(document_folder)
except FileExistsError:
pass
# TODO: Ask about self.language_code -> its 'en' right now...
with open(document_folder + '/' + self.language_code + '.pdf', 'wb') as f:
f.write(pdf_response.content)
with open(document_folder + '/' + self.language_code + '.txt', 'wb') as f:
link_text = textract.process(document_folder + '/' + self.language_code + '.pdf')
f.write(link_text)
with open(document_folder + '/' + 'metadata.json', 'w') as f:
metadata = {
'title': {
self.language_code: document_title
},
'md5': document_hash,
'releaseDate': date_str,
'url': document_url
}
json.dump(metadata, f, indent=4, sort_keys=True)
added_docs.append(document_hash)
return added_docs
# TODO: Address slow download speeds for documents
def get_docs_Opinions(self, existing_docs=[], overwrite=False, to_print=True):
print('------------ GETTING OPINIONS ------------')
added_docs = []
pagination = self.update_pagination(start_path='/el/enimerwtiko/prakseisArxis?field_year_from=&field_year_to=&field_category=238&field_thematic=All&field_protocol_number=&field_keywords=')
# This list stores the pages we have visited
visited_pages = []
while pagination.has_next():
page_url = pagination.get_next()
if to_print:
print('Page:\t', page_url)
page_source = self.get_source(page_url=page_url)
if page_source is None:
continue
results_soup = BeautifulSoup(page_source.text, 'html.parser')
assert results_soup
tbody = results_soup.find('tbody')
assert tbody
iterator = 1
for tr in tbody.find_all('tr'):
assert tr
print("\n------------ Document " + str(iterator) + " ------------")
iterator += 1
date_tag = tr.find('time', datetime='00Z')
assert date_tag
date_str = date_tag.get_text()
print('\tDocument Date: ' + date_str)
document_year = date_str[-4:]
if int(document_year) < 2018:
print("\tSkipping outdated document")
continue
a_tag = tr.find('a')
document_title = a_tag.get_text()
assert document_title
print('\tDocument Title: ' + document_title)
# Create the document has using the document title
document_hash = hashlib.md5(document_title.encode()).hexdigest()
if document_hash in existing_docs and overwrite is False:
if to_print:
print('\tSkipping existing document:\t', document_hash)
continue
document_href = a_tag.get('href')
assert document_href
if document_href.startswith('http'):
document_url = document_href
else:
document_url = 'https://www.dpa.gr' + document_href
if to_print:
print("\tDocument:\t", document_hash)
# Get document response object
document_response = None
try:
document_response = requests.request('GET', document_url, timeout=(2, 2))
document_response.raise_for_status()
# Added more errors to catch
except requests.exceptions.ReadTimeout as error:
if to_print:
print(error)
pass
except requests.exceptions.ConnectionError as error:
if to_print:
print(error)
pass
except urllib3.exceptions.ReadTimeoutError as error:
if to_print:
print(error)
pass
except socket.timeout as error:
if to_print:
print(error)
pass
if document_response is None:
continue
document_soup = BeautifulSoup(document_response.text, 'html.parser')
assert document_soup
span_tag = document_soup.find('span', class_='file-link')
assert span_tag
span_a_tag = span_tag.find('a')
assert span_a_tag
pdf_href = span_a_tag.get('href')
assert pdf_href
if pdf_href.startswith('http'):
pdf_url = pdf_href
else:
pdf_url = 'https://www.dpa.gr' + pdf_href
pdf_response = None
try:
pdf_response = requests.request('GET', pdf_url, timeout=(2, 2))
pdf_response.raise_for_status()
except requests.exceptions.HTTPError as error:
if to_print:
print(error)
pass
except requests.exceptions.ConnectionError as error:
if to_print:
print(error)
pass
except urllib3.exceptions.ReadTimeoutError as error:
if to_print:
print(error)
pass
except socket.timeout as error:
if to_print:
print(error)
pass
if pdf_response is None:
continue
dpa_folder = self.path
# document_folder = dpa_folder + '/' + 'Opinions' + '/' + document_hash
document_folder = dpa_folder + '/greece' + '/' + 'Opinions' + '/' + document_hash
try:
os.makedirs(document_folder)
except FileExistsError:
pass
# TODO: Ask about self.language_code -> its 'en' right now...
with open(document_folder + '/' + self.language_code + '.pdf', 'wb') as f:
f.write(pdf_response.content)
# If a pdf fails to convert to text (because pdf is broken), print error
try:
with open(document_folder + '/' + self.language_code + '.txt', 'wb') as f:
link_text = textract.process(document_folder + '/' + self.language_code + '.pdf')
f.write(link_text)
except:
print("Failed to convert pdf to text document.")
pass
with open(document_folder + '/' + 'metadata.json', 'w') as f:
metadata = {
'title': {
self.language_code: document_title
},
'md5': document_hash,
'releaseDate': date_str,
'url': document_url
}
json.dump(metadata, f, indent=4, sort_keys=True)
added_docs.append(document_hash)
# Add the next page to the pagination object
ul = results_soup.find('ul', class_='pagination js-pager__items')
assert ul
# The last two li tags contain links for 'next' and 'end' buttons
li_list = ul.find_all('li')
assert li_list
# The second to last element should contain the 'next page' link
a = li_list[-2].find('a')
assert a
a_href = a.get('href')
assert a_href
page_link = 'https://www.dpa.gr/el/enimerwtiko/prakseisArxis' + a_href
# If the index where next page usually is leads to a page that scraper has already visited -> don't add
# to pagination object
if page_link in visited_pages:
print("There seems to be no more pages to look at")
continue
else:
print("\nNext Page: " + page_link)
visited_pages.append(page_link)
pagination.add_item(page_link)
return added_docs
def get_docs_Guidelines(self, existing_docs=[], overwrite=False, to_print=True):
print('------------ GETTING GUIDELINES ------------')
added_docs = []
pagination = self.update_pagination(start_path='/el/enimerwtiko/prakseisArxis?field_year_from=&field_year_to=&field_category=245&field_thematic=All&field_protocol_number=&field_keywords=')
# This list stores the pages we have visited
visited_pages = []
while pagination.has_next():
page_url = pagination.get_next()
if to_print:
print('Page:\t', page_url)
page_source = self.get_source(page_url=page_url)
if page_source is None:
continue
results_soup = BeautifulSoup(page_source.text, 'html.parser')
assert results_soup
tbody = results_soup.find('tbody')
assert tbody
iterator = 1
for tr in tbody.find_all('tr'):
assert tr
print("\n------------ Document " + str(iterator) + " ------------")
iterator += 1
date_tag = tr.find('time', datetime='00Z')
assert date_tag
date_str = date_tag.get_text()
print('\tDocument Date: ' + date_str)
document_year = date_str[-4:]
if int(document_year) < 2018:
print("\tSkipping outdated document")
continue
a_tag = tr.find('a')
document_title = a_tag.get_text()
assert document_title
print('\tDocument Title: ' + document_title)
# Create the document has using the document title
document_hash = hashlib.md5(document_title.encode()).hexdigest()
if document_hash in existing_docs and overwrite is False:
if to_print:
print('\tSkipping existing document:\t', document_hash)
continue
document_href = a_tag.get('href')
assert document_href
if document_href.startswith('http'):
document_url = document_href
else:
document_url = 'https://www.dpa.gr' + document_href
if to_print:
print("\tDocument:\t", document_hash)
# Get document response object
document_response = None
try:
document_response = requests.request('GET', document_url)
document_response.raise_for_status()
except requests.exceptions.HTTPError as error:
if to_print:
print(error)
pass
if document_response is None:
continue
document_soup = BeautifulSoup(document_response.text, 'html.parser')
assert document_soup
span_tag = document_soup.find('span', class_='file-link')
assert span_tag
span_a_tag = span_tag.find('a')
assert span_a_tag
pdf_href = span_a_tag.get('href')
assert pdf_href
if pdf_href.startswith('http'):
pdf_url = pdf_href
else:
pdf_url = 'https://www.dpa.gr' + pdf_href
pdf_response = None
try:
pdf_response = requests.request('GET', pdf_url)
pdf_response.raise_for_status()
except requests.exceptions.HTTPError as error:
if to_print:
print(error)
pass
if pdf_response is None:
continue
dpa_folder = self.path
#document_folder = dpa_folder + '/' + 'Guidelines' + '/' + document_hash
document_folder = dpa_folder + '/greece' + '/' + 'Guidelines' + '/' + document_hash
try:
os.makedirs(document_folder)
except FileExistsError:
pass
# TODO: Ask about self.language_code -> its 'en' right now...
with open(document_folder + '/' + self.language_code + '.pdf', 'wb') as f:
f.write(pdf_response.content)
with open(document_folder + '/' + self.language_code + '.txt', 'wb') as f:
link_text = textract.process(document_folder + '/' + self.language_code + '.pdf')
f.write(link_text)
with open(document_folder + '/' + 'metadata.json', 'w') as f:
metadata = {
'title': {
self.language_code: document_title
},
'md5': document_hash,
'releaseDate': date_str,
'url': document_url
}
json.dump(metadata, f, indent=4, sort_keys=True)
added_docs.append(document_hash)
return added_docs
# Only visits first page because further pages contain only outdated documents
def get_docs_AnnualReports(self, existing_docs=[], overwrite=False, to_print=True):
print('------------ GETTING ANNUAL REPORTS ------------')
added_docs = []
pagination = self.update_pagination(start_path='/enimerwtiko/etisies-ektheseis')
# This list stores the pages we have visited
visited_pages = []
while pagination.has_next():
page_url = pagination.get_next()
if to_print:
print('Page:\t', page_url)
page_source = self.get_source(page_url=page_url)
if page_source is None:
continue
results_soup = BeautifulSoup(page_source.text, 'html.parser')
assert results_soup
region = results_soup.find('div', class_='region region-content')
assert region
view_content = region.find('div', class_='view-content')
assert view_content
clearfix = view_content.find('div', class_='views-col')
assert clearfix
iterator = 1
# Only look at outer 'div' tags
for div in clearfix.find_all('div', class_='events-teaser'):
assert div
print("\n------------ Document " + str(iterator) + " ------------")
iterator += 1
a_tag = div.find('a')
assert a_tag
document_year = a_tag.get_text()[-4:]
print('\tDocument Date: ' + document_year)
if int(document_year) < 2018:
print("\tSkipping outdated document")
continue
document_title = a_tag.get_text()
assert document_title
print('\tDocument Title: ' + document_title)
# Create the document has using the document title
document_hash = hashlib.md5(document_title.encode()).hexdigest()
if document_hash in existing_docs and overwrite is False:
if to_print:
print('\tSkipping existing document:\t', document_hash)
continue
document_href = a_tag.get('href')
assert document_href
if document_href.startswith('http'):
document_url = document_href
else:
document_url = 'https://www.dpa.gr' + document_href
if to_print:
print("\tDocument:\t", document_hash)
# Get document response object
document_response = None
try:
document_response = requests.request('GET', document_url, timeout=5)
document_response.raise_for_status()
except requests.exceptions.HTTPError as error:
if to_print:
print(error)
pass
if document_response is None:
continue
document_soup = BeautifulSoup(document_response.text, 'html.parser')
assert document_soup
article = document_soup.find('article', role='article')
assert article
content = article.find('div', class_='content')
assert content
pdf_a = content.find('a')
assert pdf_a
pdf_href = pdf_a.get('href')
assert pdf_href
if pdf_href.startswith('http'):
pdf_url = pdf_href
else:
pdf_url = 'https://www.dpa.gr' + pdf_href
print("\tPDF URL: " + pdf_url)
pdf_response = None
try:
pdf_response = requests.request('GET', pdf_url, timeout=5)
pdf_response.raise_for_status()
except requests.exceptions.HTTPError as error:
if to_print:
print(error)
pass
if pdf_response is None:
continue
dpa_folder = self.path
#document_folder = dpa_folder + '/' + 'Annual Reports' + '/' + document_hash
document_folder = dpa_folder + '/greece' + '/' + 'Annual Reports' + '/' + document_hash
try:
os.makedirs(document_folder)
except FileExistsError:
pass
# TODO: Ask about self.language_code -> its 'en' right now...
with open(document_folder + '/' + self.language_code + '.pdf', 'wb') as f:
f.write(pdf_response.content)
try:
with open(document_folder + '/' + self.language_code + '.txt', 'wb') as f:
link_text = textract.process(document_folder + '/' + self.language_code + '.pdf')
f.write(link_text)
except:
print("Failed to convert pdf to text document")
pass
with open(document_folder + '/' + 'metadata.json', 'w') as f:
metadata = {
'title': {
self.language_code: document_title
},
'md5': document_hash,
'releaseDate': document_year,
'url': document_url
}
json.dump(metadata, f, indent=4, sort_keys=True)
added_docs.append(document_hash)
return added_docs
| 40.886792
| 207
| 0.515367
| 3,254
| 32,505
| 4.914259
| 0.082053
| 0.018385
| 0.017447
| 0.02714
| 0.862298
| 0.859171
| 0.859171
| 0.841911
| 0.838659
| 0.835783
| 0
| 0.00529
| 0.395139
| 32,505
| 794
| 208
| 40.938287
| 0.808046
| 0.07405
| 0
| 0.831731
| 0
| 0.00641
| 0.098396
| 0.019732
| 0
| 0
| 0
| 0.001259
| 0.099359
| 1
| 0.014423
| false
| 0.038462
| 0.032051
| 0
| 0.060897
| 0.168269
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
007c4b017d2362d8a49ea6dbb5cb9f2dd89ab8d5
| 14,007
|
py
|
Python
|
querybuilder/tests/where_tests.py
|
jaredlewis/django-query-builder
|
240616783f8d912b5d3c94f083c7247763124550
|
[
"MIT"
] | null | null | null |
querybuilder/tests/where_tests.py
|
jaredlewis/django-query-builder
|
240616783f8d912b5d3c94f083c7247763124550
|
[
"MIT"
] | null | null | null |
querybuilder/tests/where_tests.py
|
jaredlewis/django-query-builder
|
240616783f8d912b5d3c94f083c7247763124550
|
[
"MIT"
] | null | null | null |
from django.db.models import Q
from django.db.models.sql import OR
from querybuilder.query import Query
from querybuilder.tests.models import Account
from querybuilder.tests.query_tests import QueryTestCase, get_comparison_str
class WhereTest(QueryTestCase):
def test_where_eq(self):
query = Query().from_table(
table='test_table'
).where(Q(
one='two'
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE (one = %(A0)s)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_named_arg(self):
query = Query().from_table(
table='test_table'
).where(
one='two'
)
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE (one = %(A0)s)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_many_named_arg(self):
query = Query().from_table(
table='test_table'
).where(
one='two',
three='four'
)
query_str = query.get_sql()
expected_queries = [
'SELECT test_table.* FROM test_table WHERE (three = %(A0)s AND one = %(A1)s)',
'SELECT test_table.* FROM test_table WHERE (one = %(A0)s AND three = %(A1)s)'
]
self.assertIn(query_str, expected_queries)
def test_where_not_eq(self):
query = Query().from_table(
table='test_table'
).where(~Q(
one='two'
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE ((NOT(one = %(A0)s)))'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_eq_explicit(self):
query = Query().from_table(
table='test_table'
).where(Q(
one__eq='two'
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE (one = %(A0)s)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_not_eq_explicit(self):
query = Query().from_table(
table='test_table'
).where(~Q(
one__eq='two'
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE ((NOT(one = %(A0)s)))'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_gt(self):
query = Query().from_table(
table='test_table'
).where(Q(
field_name__gt=10
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE (field_name > %(A0)s)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_not_gt(self):
query = Query().from_table(
table='test_table'
).where(~Q(
field_name__gt=10
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE ((NOT(field_name > %(A0)s)))'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_gte(self):
query = Query().from_table(
table='test_table'
).where(Q(
field_name__gte=10
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE (field_name >= %(A0)s)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_not_gte(self):
query = Query().from_table(
table='test_table'
).where(~Q(
field_name__gte=10
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE ((NOT(field_name >= %(A0)s)))'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_lt(self):
query = Query().from_table(
table='test_table'
).where(Q(
field_name__lt=10
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE (field_name < %(A0)s)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_not_lt(self):
query = Query().from_table(
table='test_table'
).where(~Q(
field_name__lt=10
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE ((NOT(field_name < %(A0)s)))'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_lte(self):
query = Query().from_table(
table='test_table'
).where(Q(
field_name__lte=10
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE (field_name <= %(A0)s)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_not_lte(self):
query = Query().from_table(
table='test_table'
).where(~Q(
field_name__lte=10
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE ((NOT(field_name <= %(A0)s)))'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_in_single(self):
query = Query().from_table(
table=Account
).where(Q(
id__in=10
))
query_str = query.get_sql()
expected_query = 'SELECT tests_account.* FROM tests_account WHERE (id IN (%(A0)s))'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_in_csv(self):
query = Query().from_table(
table=Account
).where(Q(
id__in='10,11,12'
))
query_str = query.get_sql()
expected_query = 'SELECT tests_account.* FROM tests_account WHERE (id IN (%(A0)s,%(A1)s,%(A2)s))'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_in_list(self):
query = Query().from_table(
table=Account
).where(Q(
id__in=[10, 11, 12]
))
query_str = query.get_sql()
expected_query = 'SELECT tests_account.* FROM tests_account WHERE (id IN (%(A0)s,%(A1)s,%(A2)s))'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_contains(self):
query = Query().from_table(
table='test_table'
).where(Q(
field_name__contains='some value'
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE (field_name LIKE %(A0)s)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
self.assertEqual(query._where.args['A0'], '%some value%', 'Value is not correct')
def test_where_icontains(self):
query = Query().from_table(
table='test_table'
).where(Q(
field_name__icontains='some value'
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE (field_name ILIKE %(A0)s)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
self.assertEqual(query._where.args['A0'], '%some value%', 'Value is not correct')
def test_where_not_contains(self):
query = Query().from_table(
table='test_table'
).where(~Q(
field_name__contains='some value'
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE ((NOT(field_name LIKE %(A0)s)))'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
self.assertEqual(query._where.args['A0'], '%some value%', 'Value is not correct')
def test_where_not_icontains(self):
query = Query().from_table(
table='test_table'
).where(~Q(
field_name__icontains='some value'
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE ((NOT(field_name ILIKE %(A0)s)))'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
self.assertEqual(query._where.args['A0'], '%some value%', 'Value is not correct')
def test_where_startswith(self):
query = Query().from_table(
table='test_table'
).where(Q(
field_name__startswith='some value'
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE (field_name LIKE %(A0)s)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
self.assertEqual(query._where.args['A0'], 'some value%', 'Value is not correct')
def test_where_not_startswith(self):
query = Query().from_table(
table='test_table'
).where(~Q(
field_name__startswith='some value'
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE ((NOT(field_name LIKE %(A0)s)))'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
self.assertEqual(query._where.args['A0'], 'some value%', 'Value is not correct')
def test_where_and(self):
query = Query().from_table(
table='test_table'
).where(Q(
one='two'
)).where(Q(
three='four'
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE (one = %(A0)s AND three = %(A1)s)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_and_combined(self):
query = Query().from_table(
table='test_table'
).where(Q(
one='two'
) & Q(
three='four'
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE (one = %(A0)s AND three = %(A1)s)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_or(self):
query = Query().from_table(
table='test_table'
).where(Q(
one='two'
)).where(Q(
three='four'
), OR)
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE ((one = %(A0)s) OR (three = %(A1)s))'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_combined_or(self):
query = Query().from_table(
table='test_table'
).where(Q(
one='two'
) | Q(
three='four'
))
query_str = query.get_sql()
expected_query = 'SELECT test_table.* FROM test_table WHERE ((one = %(A0)s OR three = %(A1)s))'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_and_with_combined_or(self):
query = Query().from_table(
table='test_table'
).where(Q(
one='two'
)).where(Q(
three='four'
) | Q(
five='six'
))
query_str = query.get_sql()
expected_query = (
'SELECT test_table.* '
'FROM test_table '
'WHERE (one = %(A0)s AND (three = %(A1)s OR five = %(A2)s))'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_and_with_not_combined_or(self):
query = Query().from_table(
table='test_table'
).where(Q(
one='two'
)).where(~Q(
three='four'
) | Q(
five='six'
))
query_str = query.get_sql()
expected_query = (
'SELECT test_table.* '
'FROM test_table '
'WHERE (one = %(A0)s AND ((NOT(three = %(A1)s)) OR five = %(A2)s))'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_where_complex(self):
query = Query().from_table(
table='test_table'
).where(Q(
one=1
)).where(Q(
two__gt=2
)).where(~Q(
three__gte=3
)).where(~Q(
four__lt=4
), OR).where(Q(
five__lte=5
), OR).where(Q(
six__contains='six'
)).where(~Q(
seven__startswith='seven'
)).where(Q(
eight=8
) & Q(
nine=9
) | Q(
ten=10
) | ~Q(
eleven=11
))
query_str = query.get_sql()
expected_query = ''.join([
'SELECT test_table.* FROM test_table WHERE ',
'(((one = %(A0)s AND two > %(A1)s AND (NOT(three >= %(A2)s))) OR (NOT(four < %(A3)s)) ',
'OR five <= %(A4)s) AND (six LIKE %(A5)s) AND (NOT(seven LIKE %(A6)s)) AND ',
'((eight = %(A7)s AND nine = %(A8)s) OR ten = %(A9)s OR (NOT(eleven = %(A10)s))))'
])
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
| 35.193467
| 105
| 0.595274
| 1,760
| 14,007
| 4.420455
| 0.052841
| 0.091517
| 0.121337
| 0.156555
| 0.924679
| 0.922751
| 0.922751
| 0.914396
| 0.909769
| 0.909769
| 0
| 0.010238
| 0.281716
| 14,007
| 397
| 106
| 35.282116
| 0.763045
| 0
| 0
| 0.752239
| 0
| 0.038806
| 0.215392
| 0.003284
| 0
| 0
| 0
| 0
| 0.107463
| 1
| 0.089552
| false
| 0
| 0.014925
| 0
| 0.107463
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
00ca84ce146586305cc671e754c4f7c0a8b57eef
| 9,602
|
py
|
Python
|
pytest/testJasvet.py
|
RomanValov/ArmoryDB
|
625eff9712161676ad83deb03616e6edb48283ca
|
[
"MIT"
] | 505
|
2016-02-04T15:54:46.000Z
|
2022-03-27T18:43:01.000Z
|
pytest/testJasvet.py
|
Lcote71/BitcoinArmory
|
1d02a6640d6257ab0c37013e5cd4b99681a5cfc3
|
[
"MIT"
] | 528
|
2016-02-06T19:50:12.000Z
|
2022-01-15T10:21:16.000Z
|
pytest/testJasvet.py
|
Lcote71/BitcoinArmory
|
1d02a6640d6257ab0c37013e5cd4b99681a5cfc3
|
[
"MIT"
] | 208
|
2015-01-02T10:31:40.000Z
|
2021-12-14T07:37:36.000Z
|
import sys
from unittest.case import SkipTest
sys.path.append('..')
from armoryengine.ALL import *
from jasvet import *
import unittest
class JasvetTester(unittest.TestCase):
def testRandomK(self):
r = randomk()
self.assertTrue(r)
def testHash160ToBC(self):
# most of these values are form the private key 1
h160 = b'751e76e8199196d454941c45d1b3a323f1433bd6'
addr = b'1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH'
h160b = binary_to_hex(bc_address_to_hash_160(addr))
self.assertEqual(h160, h160b)
addrb = hash_160_to_bc_address(hex_to_binary(h160))
self.assertEqual(addr, addrb)
pubkey = b'0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798'
addrb = public_key_to_bc_address(hex_to_binary(pubkey))
self.assertEqual(addr, addrb)
h160 = b'91b24bf9f5288532960ac687abb035127b1d28a5'
addr = b'1EHNa6Q4Jz2uvNExL497mE43ikXhwF6kZm'
h160b = binary_to_hex(bc_address_to_hash_160(addr))
self.assertEqual(h160, h160b)
addrb = hash_160_to_bc_address(hex_to_binary(h160))
self.assertEqual(addr, addrb)
pubkey = b'0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8'
addrb = public_key_to_bc_address(hex_to_binary(pubkey))
self.assertEqual(addr, addrb)
def testB58(self):
b = hex_to_binary(b'00010203')
b58 = b'1Ldp'
b58b = b58encode(b)
self.assertEqual(b58,b58b)
bb = b58decode(b58, 4)
self.assertEqual(b,bb)
def testI2d(self):
k = EC_KEY(1)
r = binary_to_hex(i2d_ECPrivateKey(k))
expected = b'3082011302010104200000000000000000000000000000000000000000000000000000000000000001a081a53081a2020101302c06072a8648ce3d0101022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f300604010004010704410479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141020101a1440342000479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8'
self.assertEqual(r,expected)
r = binary_to_hex(i2d_ECPrivateKey(k, True))
expected = b'3081d302010104200000000000000000000000000000000000000000000000000000000000000001a08185308182020101302c06072a8648ce3d0101022100fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f300604010004010704210279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798022100fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141020101a1240322000279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798'
self.assertEqual(r,expected)
def testDec(self):
x = b'7483729483792178'
x2 = binary_to_hex(decbin(0x7483729483792178))
self.assertEqual(x,x2)
x = b'ff7821798394728374'
x2 = binary_to_hex(decvi(0x7483729483792178))
self.assertEqual(x,x2)
def testFormat(self):
x = b'18426974636f696e205369676e6564204d6573736167653a0a0568656c6c6f'
x2 = binary_to_hex(format_msg_to_sign(b'hello'))
self.assertEqual(x,x2)
def testSer(self):
k = EC_KEY(1)
x = b'0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8'
x2 = binary_to_hex(k.pubkey.ser())
self.assertEqual(x,x2)
addr = public_key_to_bc_address(k.pubkey.ser())
addr2 = b'1EHNa6Q4Jz2uvNExL497mE43ikXhwF6kZm'
self.assertEqual(addr,addr2)
def testVerify(self):
sig = b'G/8M14BRD6GU96y6o1x+9xSfoWBdzZp8p1e/vAZ857D4l9+ozM08CTnzqsxkv1GANssNh1MEmtqgrgEfSPRX5gU='
msg = hex_to_binary(b'6368616e67656c6f672020202068747470733a2f2f73332e616d617a6f6e6177732e636f6d2f626974636f696e61726d6f72792d6d656469612f6368616e67656c6f672e747874202020202020202020202020323136363963313762363230353033633035353830303533353935646265646461316139633231343662336664613839313232653234343434613435646336620d0a626f6f7473747261702020202068747470733a2f2f73332e616d617a6f6e6177732e636f6d2f626974636f696e61726d6f72792d6d656469612f626f6f7473747261702e6461742e746f7272656e7420202020623632633038393332363638636531363264353132323631333539343037323465393066346337313730346163393336663734636331353362333463633235310d0a646f776e6c6f6164732020202068747470733a2f2f73332e616d617a6f6e6177732e636f6d2f626974636f696e61726d6f72792d6d656469612f646c6c696e6b732e7478742020202020202020202020202020366335306538633864386266393830306366353332643462323062663439646137633133343336313839663663316230326661386232386233383832396238330d0a6e6f746966792020202020202068747470733a2f2f73332e616d617a6f6e6177732e636f6d2f626974636f696e61726d6f72792d6d656469612f6e6f746966792e747874202020202020202020202020202020656261343931333936636531643936363731373761366532393861653334383563316462333564313064383466383965633963643838326261633266616139610d0a')
digest = hex_to_binary(b'2d2d2d2d2d424547494e20424954434f494e205349474e4544204d4553534147452d2d2d2d2d0d0a436f6d6d656e743a205369676e656420627920426974636f696e2041726d6f72792076302e39322e330d0a0d0a6368616e67656c6f672020202068747470733a2f2f73332e616d617a6f6e6177732e636f6d2f626974636f696e61726d6f72792d6d656469612f6368616e67656c6f672e747874202020202020202020202020323136363963313762363230353033633035353830303533353935646265646461316139633231343662336664613839313232653234343434613435646336620d0a626f6f7473747261702020202068747470733a2f2f73332e616d617a6f6e6177732e636f6d2f626974636f696e61726d6f72792d6d656469612f626f6f7473747261702e6461742e746f7272656e7420202020623632633038393332363638636531363264353132323631333539343037323465393066346337313730346163393336663734636331353362333463633235310d0a646f776e6c6f6164732020202068747470733a2f2f73332e616d617a6f6e6177732e636f6d2f626974636f696e61726d6f72792d6d656469612f646c6c696e6b732e7478742020202020202020202020202020366335306538633864386266393830306366353332643462323062663439646137633133343336313839663663316230326661386232386233383832396238330d0a6e6f746966792020202020202068747470733a2f2f73332e616d617a6f6e6177732e636f6d2f626974636f696e61726d6f72792d6d656469612f6e6f746966792e747874202020202020202020202020202020656261343931333936636531643936363731373761366532393861653334383563316462333564313064383466383965633963643838326261633266616139610d0a0d0a2d2d2d2d2d424547494e20424954434f494e205349474e41545552452d2d2d2d2d0d0a0d0a0d0a472f384d3134425244364755393679366f31782b397853666f5742647a5a70387031652f76415a38353744346c392b6f7a4d303843546e7a7173786b763147410d0a4e73734e68314d456d74716772674566535052583567553d0d0a3d416e6a4e0d0a2d2d2d2d2d454e4420424954434f494e205349474e41545552452d2d2d2d2d')
formatted = hex_to_binary(b'2d2d2d2d2d424547494e20424954434f494e205349474e4544204d4553534147452d2d2d2d2d0d0a436f6d6d656e743a205369676e656420627920426974636f696e2041726d6f72792076302e39322e330d0a0d0a6368616e67656c6f672020202068747470733a2f2f73332e616d617a6f6e6177732e636f6d2f626974636f696e61726d6f72792d6d656469612f6368616e67656c6f672e747874202020202020202020202020323136363963313762363230353033633035353830303533353935646265646461316139633231343662336664613839313232653234343434613435646336620d0a626f6f7473747261702020202068747470733a2f2f73332e616d617a6f6e6177732e636f6d2f626974636f696e61726d6f72792d6d656469612f626f6f7473747261702e6461742e746f7272656e7420202020623632633038393332363638636531363264353132323631333539343037323465393066346337313730346163393336663734636331353362333463633235310d0a646f776e6c6f6164732020202068747470733a2f2f73332e616d617a6f6e6177732e636f6d2f626974636f696e61726d6f72792d6d656469612f646c6c696e6b732e7478742020202020202020202020202020366335306538633864386266393830306366353332643462323062663439646137633133343336313839663663316230326661386232386233383832396238330d0a6e6f746966792020202020202068747470733a2f2f73332e616d617a6f6e6177732e636f6d2f626974636f696e61726d6f72792d6d656469612f6e6f746966792e747874202020202020202020202020202020656261343931333936636531643936363731373761366532393861653334383563316462333564313064383466383965633963643838326261633266616139610d0a0d0a2d2d2d2d2d424547494e20424954434f494e205349474e41545552452d2d2d2d2d0d0a0d0a0d0a472f384d3134425244364755393679366f31782b397853666f5742647a5a70387031652f76415a38353744346c392b6f7a4d303843546e7a7173786b763147410d0a4e73734e68314d456d74716772674566535052583567553d0d0a3d416e6a4e0d0a2d2d2d2d2d454e4420424954434f494e205349474e41545552452d2d2d2d2d')
self.assertEqual(FormatText(digest, True), formatted)
sigb, msgb = readSigBlock(digest)
self.assertEqual(sig, sigb)
self.assertEqual(msg, msgb)
self.assertEqual(verify_message_Bitcoin(sig, msg), b'1NWvhByxfTXPYNT4zMBmEY3VL8QJQtQoei')
def testSign(self):
r,s = 1,1
x = b'00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001'
x2 = binary_to_hex(Signature(r,s).ser())
self.assertEqual(x,x2)
secret = b'secretsecretsecretsecretsecretse'
message = b'hello there'
data2 = sign_message_Bitcoin(secret, message)
sign, msg = data2['b64-signature'], data2['message']
self.assertTrue(verify_message_Bitcoin(sign, msg))
def testMisc(self):
pvk1=b'\x01'*32
text1=b'Hello world!\n'
sv0=ASv0(pvk1, text1)
self.assertTrue(verifySignature(sv0['b64-signature'], sv0['message'], signVer='v0'))
d = ASv1B64(pvk1, text1)
self.assertEqual(d[:31], b'-----BEGIN BITCOIN MESSAGE-----')
self.assertEqual(d[-29:], b'-----END BITCOIN MESSAGE-----')
| 84.22807
| 1,740
| 0.877838
| 445
| 9,602
| 18.773034
| 0.298876
| 0.039502
| 0.011851
| 0.007781
| 0.486234
| 0.053866
| 0.053866
| 0.047163
| 0.047163
| 0.047163
| 0
| 0.602509
| 0.078525
| 9,602
| 113
| 1,741
| 84.973451
| 0.341659
| 0.004895
| 0
| 0.230769
| 0
| 0
| 0.693258
| 0.673995
| 0
| 1
| 0.003769
| 0
| 0.274725
| 1
| 0.10989
| false
| 0
| 0.054945
| 0
| 0.175824
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
00e32bb3a7fafaeb8ed8f24a335f3b75659990a1
| 161
|
py
|
Python
|
app/core/utils.py
|
dayvagrant/empty_api
|
66628b746b684d531e90ca5de1a8c6301688a0ab
|
[
"MIT"
] | null | null | null |
app/core/utils.py
|
dayvagrant/empty_api
|
66628b746b684d531e90ca5de1a8c6301688a0ab
|
[
"MIT"
] | null | null | null |
app/core/utils.py
|
dayvagrant/empty_api
|
66628b746b684d531e90ca5de1a8c6301688a0ab
|
[
"MIT"
] | null | null | null |
from datetime import datetime
def return_current_time():
""""Example func, return current time"""
return datetime.utcnow().strftime("%Y/%m/%d, %H:%M")
| 23
| 56
| 0.677019
| 22
| 161
| 4.863636
| 0.681818
| 0.242991
| 0.317757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149068
| 161
| 6
| 57
| 26.833333
| 0.781022
| 0.21118
| 0
| 0
| 0
| 0
| 0.123967
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
00e9f760715423adf781559d50a3be35c85e0f8c
| 768
|
py
|
Python
|
re_groups.py
|
vikashchy/regex-examples
|
b393c38c277006cb0e68cad76f00616c9d35d4d6
|
[
"Apache-2.0"
] | 1
|
2021-02-06T14:31:48.000Z
|
2021-02-06T14:31:48.000Z
|
re_groups.py
|
vikashchy/regex-examples
|
b393c38c277006cb0e68cad76f00616c9d35d4d6
|
[
"Apache-2.0"
] | null | null | null |
re_groups.py
|
vikashchy/regex-examples
|
b393c38c277006cb0e68cad76f00616c9d35d4d6
|
[
"Apache-2.0"
] | null | null | null |
import re
""" Get the area code from the phone number"""
text = "my phone number is: 963-221-1633"
phone_no_regex = re.compile(r'(\d\d\d)-(\d\d\d-\d\d\d\d)') # group the patterns inside parentheses
match_object = phone_no_regex.search(text)
print(match_object.group(0)) # this will return the whole string
print(match_object.group(1)) # this will return the first match
print(match_object.group(2)) # this will return the second match
text = "my phone number is: (963)-221-1633"
phone_no_regex = re.compile(r'\(\d\d\d\)-(\d\d\d-\d\d\d\d)') # group the patterns inside parentheses
match_object = phone_no_regex.search(text)
print(match_object.group(0)) # this will return the whole string
print(match_object.group(1)) # this will return the first match
| 42.666667
| 101
| 0.72526
| 137
| 768
| 3.956204
| 0.277372
| 0.066421
| 0.088561
| 0.103321
| 0.833948
| 0.833948
| 0.833948
| 0.833948
| 0.833948
| 0.833948
| 0
| 0.037707
| 0.136719
| 768
| 17
| 102
| 45.176471
| 0.779789
| 0.316406
| 0
| 0.5
| 0
| 0.166667
| 0.254777
| 0.11465
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0.416667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
00f55778620cb6b212d5682c14f1aca012956942
| 1,234
|
py
|
Python
|
scitbx/source_generators/array_family/generate_all.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | 155
|
2016-11-23T12:52:16.000Z
|
2022-03-31T15:35:44.000Z
|
scitbx/source_generators/array_family/generate_all.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | 590
|
2016-12-10T11:31:18.000Z
|
2022-03-30T23:10:09.000Z
|
scitbx/source_generators/array_family/generate_all.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | 115
|
2016-11-15T08:17:28.000Z
|
2022-02-09T15:30:14.000Z
|
from __future__ import absolute_import, division, print_function
import os
import scitbx.source_generators.array_family.generate_reductions
import scitbx.source_generators.array_family.generate_std_imports
import scitbx.source_generators.array_family.generate_operator_functors
import scitbx.source_generators.array_family.generate_operator_traits_builtin
import scitbx.source_generators.array_family.generate_algebras
import scitbx.source_generators.array_family.generate_apply
def refresh(array_family):
assert os.path.isdir(array_family)
array_family_detail = os.path.join(array_family, "detail")
assert os.path.isdir(array_family_detail)
print(' Generating C++ header files in:\n "%s"' % array_family)
scitbx.source_generators.array_family.generate_reductions.run(array_family)
scitbx.source_generators.array_family.generate_std_imports.run(array_family)
scitbx.source_generators.array_family.generate_operator_functors.run(array_family)
scitbx.source_generators.array_family.generate_operator_traits_builtin.run(array_family)
scitbx.source_generators.array_family.generate_algebras.run(array_family)
scitbx.source_generators.array_family.generate_apply.run(array_family)
assert __name__ != "__main__"
| 49.36
| 90
| 0.863857
| 165
| 1,234
| 6.024242
| 0.242424
| 0.265594
| 0.265594
| 0.325956
| 0.809859
| 0.809859
| 0.753521
| 0.559356
| 0.368209
| 0.126761
| 0
| 0
| 0.062399
| 1,234
| 24
| 91
| 51.416667
| 0.859118
| 0
| 0
| 0
| 1
| 0
| 0.046191
| 0
| 0
| 0
| 0
| 0
| 0.15
| 1
| 0.05
| false
| 0
| 0.45
| 0
| 0.5
| 0.1
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
dac71f6b74e25bbb097213e3942c9c31a0180c10
| 4,657
|
py
|
Python
|
tests/components/ffmpeg/test_sensor.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
tests/components/ffmpeg/test_sensor.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
tests/components/ffmpeg/test_sensor.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""The tests for Open Peer Power ffmpeg binary sensor."""
from unittest.mock import patch
from openpeerpower.setup import setup_component
from tests.common import assert_setup_component, get_test_open_peer_power, mock_coro
class TestFFmpegNoiseSetup:
"""Test class for ffmpeg."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.opp = get_test_open_peer_power()
self.config = {
"binary_sensor": {"platform": "ffmpeg_noise", "input": "testinputvideo"}
}
def teardown_method(self):
"""Stop everything that was started."""
self.opp.stop()
def test_setup_component(self):
"""Set up ffmpeg component."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.opp, "binary_sensor", self.config)
self.opp.block_till_done()
assert self.opp.data["ffmpeg"].binary == "ffmpeg"
assert self.opp.states.get("binary_sensor.ffmpeg_noise") is not None
@patch("haffmpeg.sensor.SensorNoise.open_sensor", return_value=mock_coro())
def test_setup_component_start(self, mock_start):
"""Set up ffmpeg component."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.opp, "binary_sensor", self.config)
self.opp.block_till_done()
assert self.opp.data["ffmpeg"].binary == "ffmpeg"
assert self.opp.states.get("binary_sensor.ffmpeg_noise") is not None
self.opp.start()
assert mock_start.called
entity = self.opp.states.get("binary_sensor.ffmpeg_noise")
assert entity.state == "unavailable"
@patch("haffmpeg.sensor.SensorNoise")
def test_setup_component_start_callback(self, mock_ffmpeg):
"""Set up ffmpeg component."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.opp, "binary_sensor", self.config)
self.opp.block_till_done()
assert self.opp.data["ffmpeg"].binary == "ffmpeg"
assert self.opp.states.get("binary_sensor.ffmpeg_noise") is not None
self.opp.start()
entity = self.opp.states.get("binary_sensor.ffmpeg_noise")
assert entity.state == "off"
self.opp.add_job(mock_ffmpeg.call_args[0][1], True)
self.opp.block_till_done()
entity = self.opp.states.get("binary_sensor.ffmpeg_noise")
assert entity.state == "on"
class TestFFmpegMotionSetup:
"""Test class for ffmpeg."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.opp = get_test_open_peer_power()
self.config = {
"binary_sensor": {"platform": "ffmpeg_motion", "input": "testinputvideo"}
}
def teardown_method(self):
"""Stop everything that was started."""
self.opp.stop()
def test_setup_component(self):
"""Set up ffmpeg component."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.opp, "binary_sensor", self.config)
self.opp.block_till_done()
assert self.opp.data["ffmpeg"].binary == "ffmpeg"
assert self.opp.states.get("binary_sensor.ffmpeg_motion") is not None
@patch("haffmpeg.sensor.SensorMotion.open_sensor", return_value=mock_coro())
def test_setup_component_start(self, mock_start):
"""Set up ffmpeg component."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.opp, "binary_sensor", self.config)
self.opp.block_till_done()
assert self.opp.data["ffmpeg"].binary == "ffmpeg"
assert self.opp.states.get("binary_sensor.ffmpeg_motion") is not None
self.opp.start()
assert mock_start.called
entity = self.opp.states.get("binary_sensor.ffmpeg_motion")
assert entity.state == "unavailable"
@patch("haffmpeg.sensor.SensorMotion")
def test_setup_component_start_callback(self, mock_ffmpeg):
"""Set up ffmpeg component."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.opp, "binary_sensor", self.config)
self.opp.block_till_done()
assert self.opp.data["ffmpeg"].binary == "ffmpeg"
assert self.opp.states.get("binary_sensor.ffmpeg_motion") is not None
self.opp.start()
entity = self.opp.states.get("binary_sensor.ffmpeg_motion")
assert entity.state == "off"
self.opp.add_job(mock_ffmpeg.call_args[0][1], True)
self.opp.block_till_done()
entity = self.opp.states.get("binary_sensor.ffmpeg_motion")
assert entity.state == "on"
| 35.549618
| 85
| 0.660082
| 590
| 4,657
| 4.989831
| 0.132203
| 0.099864
| 0.052989
| 0.065217
| 0.907609
| 0.900815
| 0.887908
| 0.867527
| 0.867527
| 0.867527
| 0
| 0.002738
| 0.215804
| 4,657
| 130
| 86
| 35.823077
| 0.803395
| 0.088254
| 0
| 0.839506
| 0
| 0
| 0.195455
| 0.108134
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.123457
| false
| 0
| 0.037037
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
97428ed78ff404e3b4f763730c0446627db8193c
| 8,698
|
py
|
Python
|
IoT_LTH_source.py
|
BG4Finance/USI_HW
|
8e585c2814e7016eec388d84ba5335e2cfe40f0f
|
[
"MIT"
] | null | null | null |
IoT_LTH_source.py
|
BG4Finance/USI_HW
|
8e585c2814e7016eec388d84ba5335e2cfe40f0f
|
[
"MIT"
] | null | null | null |
IoT_LTH_source.py
|
BG4Finance/USI_HW
|
8e585c2814e7016eec388d84ba5335e2cfe40f0f
|
[
"MIT"
] | null | null | null |
#IoT Light, temperature and humidity sensor for indoor plantation growth
import machine, dht, ssd1306
from time import time, sleep
from machine import I2C, Pin
green = Pin(12,Pin.OUT) #PIN 12
red = Pin(15,Pin.OUT) #PIN 15
yellow = Pin(13,Pin.OUT) #PIN 13
i2c = I2C(‐1, Pin(5), Pin(4)) #PIN 5 & 4
i2c.scan()
display = ssd1306.SSD1306_I2C(64, 48, i2c)
def plantemp(mint,maxt,minh,maxh):
light = machine.Pin(2,machine.Pin.IN) #PIN 2
sensor = dht.DHT22(machine.Pin(16)) #PIN 16
if (light.value()==0):
green.off()
yellow.off()
red.off()
if (mint<=sensor.temperature()<=maxt and minh<=sensor.humidity()<=maxh):
green.on()
print('Everything Perfect')
print("Sunny,",sensor.temperature(),'°C')
display.text("Sunny, Everything Perfect", 0, 0)
display.text("T: "+str(sensor.temperature()), 0, 0)
display.text("H: "+str(sensor.humidity()), 0, 0)
elif (sensor.temperature()<mint and minh<=sensor.humidity()<=maxh):
red.on()
print('Low Temperature, please FIX')
print("Sunny,",sensor.temperature(),'°C')
display.text("Sunny", 0, 0)
display.text("Temperature issue, please FIX", 0, 0)
display.text("T: "+str(sensor.temperature()), 0, 0)
elif (sensor.temperature()>maxt and minh<=sensor.humidity()<=maxh):
red.on()
print('High Temperature, please FIX')
print("Sunny,",sensor.temperature(),'°C')
display.text("Sunny", 0, 0)
display.text("Temperature issue, please FIX", 0, 0)
display.text("T: "+str(sensor.temperature()), 0, 0)
elif (mint<=sensor.temperature()<=maxt and sensor.humidity()<minh):
yellow.on()
print('Low Humidity, please FIX')
print("Sunny,",sensor.temperature(),'°C')
display.text("Sunny", 0, 0)
display.text("Humidity issue, please FIX", 0, 0)
display.text("H: "+str(sensor.humidity()), 0, 0)
elif (mint<=sensor.temperature()<=maxt and sensor.humidity()>maxh):
yellow.on()
print('High Humidity, please FIX')
print("Sunny,",sensor.temperature(),'°C')
display.text("Sunny", 0, 0)
display.text("Humidity issue, please FIX", 0, 0)
display.text("H: "+str(sensor.humidity()), 0, 0)
elif (sensor.temperature()>maxt and sensor.humidity()<minh):
red.on()
yellow.on()
print('High Temperature & Low Humidity, please FIX')
print("Sunny,",sensor.temperature(),'°C')
display.text("Sunny", 0, 0)
display.text("T & H issues, please FIX", 0, 0)
display.text("T: "+str(sensor.temperature()), 0, 0)
display.text("H: "+str(sensor.humidity()), 0, 0)
elif (sensor.temperature()<mint and sensor.humidity()>maxh):
red.on()
yellow.on()
print('Low Temperature & High Humidity, please FIX')
print("Sunny,",sensor.temperature(),'°C')
display.text("Sunny", 0, 0)
display.text("T & H issues, please FIX", 0, 0)
display.text("T: "+str(sensor.temperature()), 0, 0)
display.text("H: "+str(sensor.humidity()), 0, 0)
elif (sensor.temperature()<mint and sensor.humidity()<minh):
red.on()
yellow.on()
print('Low Temperature & Low Humidity, please FIX')
print("Sunny,",sensor.temperature(),'°C')
display.text("Sunny", 0, 0)
display.text("T & H issues, please FIX", 0, 0)
display.text("T: "+str(sensor.temperature()), 0, 0)
display.text("H: "+str(sensor.humidity()), 0, 0)
elif (sensor.temperature()>maxt and sensor.humidity()>maxh):
red.on()
yellow.on()
print('High Temperature & High Humidity, please FIX')
print("Sunny,",sensor.temperature(),'°C')
display.text("Sunny", 0, 0)
display.text("T & H issues, please FIX", 0, 0)
display.text("T: "+str(sensor.temperature()), 0, 0)
display.text("H: "+str(sensor.humidity()), 0, 0)
else:
if ((mint-5)<=sensor.temperature()<=(maxt-5) and (minh-10)<=sensor.humidity()<=(maxh-10)):
green.on()
display.text("Bed Time, Nice!", 0, 0)
display.text("T: "+str(sensor.temperature()), 0, 0)
display.text("H: "+str(sensor.humidity()), 0, 0)
elif (sensor.temperature()<(mint-5) and (minh-10)<=sensor.humidity()<=(maxh-10)):
red.on()
print('Low Temperature, please FIX')
print("Bed Time, Nice!,",sensor.temperature(),'°C')
display.text("Bed Time", 0, 0)
display.text("Temperature issue, please FIX", 0, 0)
display.text("T: "+str(sensor.temperature()), 0, 0)
elif (sensor.temperature()>(maxt-5) and (minh-10)<=sensor.humidity()<=(maxh-10)):
red.on()
print('High Temperature, please FIX')
print("Bed Time,",sensor.temperature(),'°C')
display.text("Bed Time", 0, 0)
display.text("Temperature issue, please FIX", 0, 0)
display.text("T: "+str(sensor.temperature()), 0, 0)
elif ((mint-5)<=sensor.temperature()<=(maxt-5) and sensor.humidity()<(minh-10)):
yellow.on()
print('Low Humidity, please FIX')
print("Bed Time,",sensor.temperature(),'°C')
display.text("Bed Time", 0, 0)
display.text("Humidity issue, please FIX", 0, 0)
display.text("H: "+str(sensor.humidity()), 0, 0)
elif ((mint-5)<=sensor.temperature()<=(maxt-5) and sensor.humidity()>(maxh-10)):
yellow.on()
print('High Humidity, please FIX')
print("Bed Time,",sensor.temperature(),'°C')
display.text("Bed Time", 0, 0)
display.text("Humidity issue, please FIX", 0, 0)
display.text("H: "+str(sensor.humidity()), 0, 0)
elif (sensor.temperature()>(maxt-5) and sensor.humidity()<(minh-10)):
red.on()
yellow.on()
print('High Temperature & Low Humidity, please FIX')
print("BAD Time,",sensor.temperature(),'°C')
display.text("BAD Time", 0, 0)
display.text("T & H issues, please FIX", 0, 0)
display.text("T: "+str(sensor.temperature()), 0, 0)
display.text("H: "+str(sensor.humidity()), 0, 0)
elif (sensor.temperature()<(mint-5) and sensor.humidity()>(maxh-10)):
red.on()
yellow.on()
print('Low Temperature & High Humidity, please FIX')
print("BAD Time,",sensor.temperature(),'°C')
display.text("BAD Time", 0, 0)
display.text("T & H issues, please FIX", 0, 0)
display.text("T: "+str(sensor.temperature()), 0, 0)
display.text("H: "+str(sensor.humidity()), 0, 0)
elif (sensor.temperature()<(mint-5) and sensor.humidity()<(minh-10)):
red.on()
yellow.on()
print('Low Temperature & Low Humidity, please FIX')
print("BAD Time,",sensor.temperature(),'°C')
display.text("BAD Time", 0, 0)
display.text("T & H issues, please FIX", 0, 0)
display.text("T: "+str(sensor.temperature()), 0, 0)
display.text("H: "+str(sensor.humidity()), 0, 0)
elif (sensor.temperature()>(maxt-5) and sensor.humidity()>(maxh-10)):
red.on()
yellow.on()
print('High Temperature & High Humidity, please FIX')
print("BAD Time,",sensor.temperature(),'°C')
display.text("BAD Time", 0, 0)
display.text("T & H issues, please FIX", 0, 0)
display.text("T: "+str(sensor.temperature()), 0, 0)
display.text("H: "+str(sensor.humidity()), 0, 0)
while True: #setting temperature humidity limits and refresh rate
a = 20
b = 25
c = 60
d = 80
plantemp(a,b,c,d)
time.sleep(300)
| 50.865497
| 98
| 0.506898
| 1,028
| 8,698
| 4.305447
| 0.082685
| 0.154089
| 0.089471
| 0.129236
| 0.874605
| 0.873701
| 0.872119
| 0.869182
| 0.819024
| 0.802531
| 0
| 0.037347
| 0.325822
| 8,698
| 170
| 99
| 51.164706
| 0.714359
| 0.01851
| 0
| 0.719512
| 0
| 0
| 0.161449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.018293
| null | null | 0.207317
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c1560bd9f52172f8443c2d6f5141bc4661fcb32a
| 87,121
|
py
|
Python
|
Tests/test_Align_bed.py
|
sgalpha01/biopython
|
4ecfd97d38f1dc3d251bd6c84e7ae5d736c87edb
|
[
"BSD-3-Clause"
] | null | null | null |
Tests/test_Align_bed.py
|
sgalpha01/biopython
|
4ecfd97d38f1dc3d251bd6c84e7ae5d736c87edb
|
[
"BSD-3-Clause"
] | 17
|
2021-11-10T10:24:20.000Z
|
2021-11-16T07:35:45.000Z
|
Tests/test_Align_bed.py
|
sgalpha01/biopython
|
4ecfd97d38f1dc3d251bd6c84e7ae5d736c87edb
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2022 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for Align.bed module."""
import unittest
import os
import warnings
from io import StringIO
from Bio.Align import Alignment
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
from Bio import BiopythonExperimentalWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonExperimentalWarning)
from Bio.Align import bed
try:
import numpy
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install numpy if you want to use Bio.Align.bed."
) from None
class TestAlign_dna_rna(unittest.TestCase):
# The BED file dna_rna.bed was generated using this command:
# pslToBed dna_rna.psl dna_rna.bed
def setUp(self):
data = {}
records = SeqIO.parse("Blat/dna.fa", "fasta")
for record in records:
name, start_end = record.id.split(":")
assert name == "chr3"
start, end = start_end.split("-")
start = int(start)
end = int(end)
sequence = str(record.seq)
assert len(sequence) == end - start
data[start] = sequence
self.dna = Seq(data, length=198295559) # hg38 chr3
records = SeqIO.parse("Blat/rna.fa", "fasta")
self.rna = {record.id: record.seq for record in records}
def test_reading(self):
"""Test parsing dna_rna.bed."""
path = "Blat/dna_rna.bed"
alignments = bed.AlignmentIterator(path)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 5407))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr3")
self.assertEqual(alignment.query.id, "NR_111921.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[48663767, 48663813, 48665640,
48665722, 48669098, 48669174],
[ 0, 46, 46,
128, 128, 204]])
# fmt: on
)
)
alignment.target.seq = self.dna
alignment.query.seq = self.rna[alignment.query.id]
self.assertTrue(
numpy.array_equal(
alignment.substitutions,
# fmt: off
# flake8: noqa
numpy.array([[53., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 35., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 50., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 27., 0., 0., 0., 0.],
[ 9., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 7., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 16., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 7., 0., 0., 0., 0.],
])
)
)
self.assertEqual(alignment.substitutions.alphabet, "ACGTacgt")
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 1711))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr3")
self.assertEqual(alignment.query.id, "NR_046654.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[42530895, 42530958, 42532020,
42532095, 42532563, 42532606],
[ 181, 118, 118,
43, 43, 0]])
# fmt: on
)
)
alignment.target.seq = self.dna
alignment.query.seq = self.rna[alignment.query.id]
self.assertTrue(
numpy.array_equal(
alignment.substitutions,
# fmt: off
# flake8: noqa
numpy.array([[36., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 40., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 57., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 42., 0., 0., 0., 0.],
[ 2., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 3., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
])
)
)
self.assertEqual(alignment.substitutions.alphabet, "ACGTacgt")
alignment = next(alignments)
self.assertEqual(alignment.score, 992)
self.assertEqual(alignment.shape, (2, 5407))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr3")
self.assertEqual(alignment.query.id, "NR_111921.1_modified")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[48663767, 48663795, 48663796, 48663813, 48665640,
48665716, 48665722, 48669098, 48669174],
[ 0, 28, 28, 45, 45,
121, 127, 127, 203]])
# fmt: on
)
)
# The modified RNAs have gaps in their sequence. As this information is
# not stored in a BED file, we cannot calculate the substitution matrix.
alignment = next(alignments)
self.assertEqual(alignment.score, 990)
self.assertEqual(alignment.shape, (2, 1711))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr3")
self.assertEqual(alignment.query.id, "NR_046654.1_modified")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[42530895, 42530922, 42530958, 42532020, 42532037,
42532039, 42532095, 42532563, 42532606],
[ 179, 152, 116, 116, 99,
99, 43, 43, 0]])
# fmt: on
)
)
self.assertRaises(StopIteration, next, alignments)
def test_writing(self):
"""Test writing the alignments in dna_rna.bed."""
path = "Blat/dna_rna.bed"
with open(path) as stream:
original_data = stream.read()
alignments = bed.AlignmentIterator(path)
stream = StringIO()
writer = bed.AlignmentWriter(stream)
n = writer.write_file(alignments, mincount=4, maxcount=4)
self.assertEqual(n, 4)
stream.seek(0)
written_data = stream.read()
stream.close()
self.assertEqual(original_data, written_data)
class TestAlign_dna(unittest.TestCase):
def test_reading_psl_34_001(self):
"""Test parsing psl_34_001.bed."""
path = "Blat/psl_34_001.bed"
alignments = bed.AlignmentIterator(path)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 16))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr4")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[61646095, 61646111],
[ 0, 16]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 33))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[10271783, 10271816],
[ 0, 33]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 17))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[53575980, 53575997],
[ 17, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 854)
self.assertEqual(alignment.shape, (2, 41))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr9")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[85737865, 85737906],
[ 0, 41]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 41))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr8")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[95160479, 95160520],
[ 0, 41]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 834)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr22")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[42144400, 42144436],
[ 0, 36]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 682)
self.assertEqual(alignment.shape, (2, 44))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[183925984, 183925990, 183926028],
[ 0, 6, 44]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 890)
self.assertEqual(alignment.shape, (2, 170))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[35483340, 35483365, 35483499, 35483510],
[ 0, 25, 25, 36]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr18")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[23891310, 23891349],
[ 0, 39]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 930)
self.assertEqual(alignment.shape, (2, 28))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr18")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[43252217, 43252245],
[ 0, 28]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 912)
self.assertEqual(alignment.shape, (2, 51))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[52759147, 52759154, 52759160, 52759198],
[ 0, 7, 7, 45]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 50))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[1207056, 1207106],
[ 0, 50]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 824)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[61700837, 61700871],
[ 0, 34]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 572)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr4")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[37558157, 37558167, 37558173, 37558191],
[ 28, 18, 18, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 892)
self.assertEqual(alignment.shape, (2, 37))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr22")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[48997405, 48997442],
[ 37, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 946)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[120641740, 120641776],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[54017130, 54017169],
[ 39, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 848)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[553742, 553781],
[ 39, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 834)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr10")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[99388555, 99388591],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 920)
self.assertEqual(alignment.shape, (2, 25))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr10")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[112178171, 112178196],
[ 25, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 946)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[39368490, 39368526],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 942)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[220325687, 220325721],
[ 34, 0]]),
# fmt: on
)
)
self.assertRaises(StopIteration, next, alignments)
def test_writing_psl_34_001(self):
"""Test writing the alignments in psl_34_001.bed."""
path = "Blat/psl_34_001.bed"
with open(path) as stream:
original_data = stream.read()
alignments = bed.AlignmentIterator(path)
stream = StringIO()
writer = bed.AlignmentWriter(stream)
n = writer.write_file(alignments, mincount=22, maxcount=22)
self.assertEqual(n, 22)
stream.seek(0)
written_data = stream.read()
stream.close()
self.assertEqual(original_data, written_data)
def test_reading_psl_34_003(self):
"""Test parsing psl_34_003.bed."""
path = "Blat/psl_34_003.bed"
alignments = bed.AlignmentIterator(path)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 16))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr4")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[61646095, 61646111],
[ 0, 16]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 33))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[10271783, 10271816],
[ 0, 33]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 17))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[53575980, 53575997],
[ 17, 0]]),
# fmt: on
)
)
self.assertRaises(StopIteration, next, alignments)
def test_writing_psl_34_003(self):
"""Test writing the alignments in psl_34_003.bed."""
path = "Blat/psl_34_003.bed"
with open(path) as stream:
original_data = stream.read()
alignments = bed.AlignmentIterator(path)
stream = StringIO()
writer = bed.AlignmentWriter(stream)
n = writer.write_file(alignments, mincount=3, maxcount=3)
self.assertEqual(n, 3)
stream.seek(0)
written_data = stream.read()
stream.close()
self.assertEqual(original_data, written_data)
def test_reading_psl_34_004(self):
"""Test parsing psl_34_004.bed."""
path = "Blat/psl_34_004.bed"
alignments = bed.AlignmentIterator(path)
alignment = next(alignments)
self.assertEqual(alignment.score, 854)
self.assertEqual(alignment.shape, (2, 41))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr9")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[85737865, 85737906],
[ 0, 41]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 41))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr8")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[95160479, 95160520],
[ 0, 41]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 834)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr22")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[42144400, 42144436],
[ 0, 36]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 682)
self.assertEqual(alignment.shape, (2, 44))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[183925984, 183925990, 183926028],
[ 0, 6, 44]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 890)
self.assertEqual(alignment.shape, (2, 170))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[35483340, 35483365, 35483499, 35483510],
[ 0, 25, 25, 36]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr18")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[23891310, 23891349],
[ 0, 39]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 930)
self.assertEqual(alignment.shape, (2, 28))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr18")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[43252217, 43252245],
[ 0, 28]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 912)
self.assertEqual(alignment.shape, (2, 51))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[52759147, 52759154, 52759160, 52759198],
[ 0, 7, 7, 45]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 50))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[1207056, 1207106],
[ 0, 50]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 824)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[61700837, 61700871],
[ 0, 34]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 572)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr4")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[37558157, 37558167, 37558173, 37558191],
[ 28, 18, 18, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 892)
self.assertEqual(alignment.shape, (2, 37))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr22")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[48997405, 48997442],
[ 37, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 946)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[120641740, 120641776],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[54017130, 54017169],
[ 39, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 848)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[553742, 553781],
[ 39, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 834)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr10")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[99388555, 99388591],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 920)
self.assertEqual(alignment.shape, (2, 25))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr10")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[112178171, 112178196],
[ 25, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 946)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[39368490, 39368526],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 942)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[220325687, 220325721],
[ 34, 0]]),
# fmt: on
)
)
self.assertRaises(StopIteration, next, alignments)
def test_writing_psl_34_004(self):
"""Test writing the alignments in psl_34_004.bed."""
path = "Blat/psl_34_004.bed"
with open(path) as stream:
original_data = stream.read()
alignments = bed.AlignmentIterator(path)
stream = StringIO()
writer = bed.AlignmentWriter(stream)
n = writer.write_file(alignments, mincount=19, maxcount=19)
self.assertEqual(n, 19)
stream.seek(0)
written_data = stream.read()
stream.close()
self.assertEqual(original_data, written_data)
def test_reading_psl_34_005(self):
"""Test parsing psl_34_005.bed."""
path = "Blat/psl_34_005.bed"
alignments = bed.AlignmentIterator(path)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 16))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr4")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[61646095, 61646111],
[ 0, 16]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 33))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[10271783, 10271816],
[ 0, 33]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 17))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[53575980, 53575997],
[ 17, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 854)
self.assertEqual(alignment.shape, (2, 41))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr9")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[85737865, 85737906],
[ 0, 41]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 41))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr8")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[95160479, 95160520],
[ 0, 41]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 834)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr22")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[42144400, 42144436],
[ 0, 36]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 682)
self.assertEqual(alignment.shape, (2, 44))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[183925984, 183925990, 183926028],
[ 0, 6, 44]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 890)
self.assertEqual(alignment.shape, (2, 170))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[35483340, 35483365, 35483499, 35483510],
[ 0, 25, 25, 36]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr18")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[23891310, 23891349],
[ 0, 39]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 930)
self.assertEqual(alignment.shape, (2, 28))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr18")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[43252217, 43252245],
[ 0, 28]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 912)
self.assertEqual(alignment.shape, (2, 51))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[52759147, 52759154, 52759160, 52759198],
[ 0, 7, 7, 45]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 50))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[1207056, 1207106],
[ 0, 50]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 824)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[61700837, 61700871],
[ 0, 34]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 572)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr4")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[37558157, 37558167, 37558173, 37558191],
[ 28, 18, 18, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 892)
self.assertEqual(alignment.shape, (2, 37))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr22")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[48997405, 48997442],
[ 37, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 946)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[120641740, 120641776],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[54017130, 54017169],
[ 39, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 848)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[553742, 553781],
[ 39, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 834)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr10")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[99388555, 99388591],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 920)
self.assertEqual(alignment.shape, (2, 25))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr10")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[112178171, 112178196],
[ 25, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 946)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
numpy.array([[39368490, 39368526],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 942)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[220325687, 220325721],
[ 34, 0]]),
# fmt: on
)
)
self.assertRaises(StopIteration, next, alignments)
def test_writing_psl_34_005(self):
"""Test writing the alignments in psl_34_005.bed."""
path = "Blat/psl_34_005.bed"
with open(path) as stream:
original_data = stream.read()
alignments = bed.AlignmentIterator(path)
stream = StringIO()
writer = bed.AlignmentWriter(stream)
n = writer.write_file(alignments, mincount=22, maxcount=22)
self.assertEqual(n, 22)
stream.seek(0)
written_data = stream.read()
stream.close()
self.assertEqual(original_data, written_data)
class TestAlign_dnax_prot(unittest.TestCase):
def test_reading_psl_35_001(self):
"""Test parsing psl_35_001.bed."""
path = "Blat/psl_35_001.bed"
alignments = bed.AlignmentIterator(path)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[75566694, 75566850],
[ 0, 156]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[75560749, 75560881],
[ 0, 132]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 986)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[75549820, 75549865, 75567225, 75567312],
[ 0, 45, 45, 132]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[75604767, 75604827, 75605728, 75605809],
[ 0, 60, 60, 141]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[75594914, 75594989],
[ 0, 75]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[75569459, 75569507],
[ 0, 48]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 530)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr4")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[41260685, 41260787],
[ 0, 102]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 166)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr4")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[41257605, 41257731, 41263227, 41263290],
[ 0, 126, 126, 189]]),
# fmt: on
)
)
self.assertRaises(StopIteration, next, alignments)
def test_writing_psl_35_001(self):
"""Test writing the alignments in psl_35_001.bed."""
path = "Blat/psl_35_001.bed"
with open(path) as stream:
original_data = stream.read()
alignments = bed.AlignmentIterator(path)
stream = StringIO()
writer = bed.AlignmentWriter(stream)
n = writer.write_file(alignments, mincount=8, maxcount=8)
self.assertEqual(n, 8)
stream.seek(0)
written_data = stream.read()
stream.close()
self.assertEqual(original_data, written_data)
def test_reading_psl_35_002(self):
"""Test parsing psl_35_002.bed."""
path = "Blat/psl_35_002.bed"
alignments = bed.AlignmentIterator(path)
alignment = next(alignments)
self.assertEqual(alignment.score, 972)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "KI537979")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[9712654, 9712786, 9715941, 9716097, 9716445, 9716532, 9718374,
9718422, 9739264, 9739339, 9743706, 9743766, 9744511, 9744592],
[ 0, 132, 132, 288, 288, 375, 375,
423, 423, 498, 498, 558, 558, 639]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 792)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "KI538594")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[2103463, 2103523, 2103522, 2104149],
[ 0, 60, 60, 687]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 902)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "KI537194")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[20872390, 20872471, 20872472, 20873021],
[ 630, 549, 549, 0]]),
# fmt: on
)
)
self.assertRaises(StopIteration, next, alignments)
def test_writing_psl_35_002(self):
"""Test writing the alignments in psl_35_002.bed."""
path = "Blat/psl_35_002.bed"
with open(path) as stream:
original_data = stream.read()
alignments = bed.AlignmentIterator(path)
stream = StringIO()
writer = bed.AlignmentWriter(stream)
n = writer.write_file(alignments, mincount=3, maxcount=3)
self.assertEqual(n, 3)
stream.seek(0)
written_data = stream.read()
stream.close()
self.assertEqual(original_data, written_data)
class TestAlign_bed12(unittest.TestCase):
def test_reading(self):
"""Test parsing alignments in file formats BED3 through BED12."""
for bedN in (3, 4, 5, 6, 7, 8, 9, 12):
filename = "bed%d.bed" % bedN
path = os.path.join("Blat", filename)
alignments = bed.AlignmentIterator(path)
alignment = next(alignments)
if bedN >= 5:
self.assertEqual(alignment.score, 960, msg=filename)
self.assertEqual(alignment.shape, (2, 4000), msg=filename)
self.assertLess(
alignment.coordinates[0, 0], alignment.coordinates[0, -1], msg=filename
)
self.assertLess(
alignment.coordinates[1, 0], alignment.coordinates[1, -1], msg=filename
)
self.assertEqual(len(alignment), 2, msg=filename)
self.assertIs(alignment.sequences[0], alignment.target, msg=filename)
self.assertIs(alignment.sequences[1], alignment.query, msg=filename)
self.assertEqual(alignment.target.id, "chr22", msg=filename)
if bedN >= 4:
self.assertEqual(alignment.query.id, "mRNA1", msg=filename)
else:
self.assertIsNone(alignment.query.id, msg=filename)
if bedN == 12:
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[1000, 1567, 4512, 5000],
[ 0, 567, 567, 1055]]),
# fmt: on
),
msg=filename,
)
else:
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
numpy.array([[1000, 5000], [0, 4000]]),
),
msg=filename,
)
if bedN >= 7:
self.assertEqual(alignment.thickStart, 1200, msg=filename)
if bedN >= 8:
self.assertEqual(alignment.thickEnd, 5900, msg=filename)
if bedN >= 9:
self.assertEqual(alignment.itemRgb, "255,0,0", msg=filename)
alignment = next(alignments)
if bedN >= 5:
self.assertEqual(alignment.score, 900, msg=filename)
self.assertEqual(alignment.shape, (2, 4000), msg=filename)
self.assertLess(
alignment.coordinates[0, 0], alignment.coordinates[0, -1], msg=filename
)
if bedN >= 6:
self.assertGreater(
alignment.coordinates[1, 0],
alignment.coordinates[1, -1],
msg=filename,
)
else:
self.assertLess(
alignment.coordinates[1, 0],
alignment.coordinates[1, -1],
msg=filename,
)
self.assertEqual(len(alignment), 2, msg=filename)
self.assertIs(alignment.sequences[0], alignment.target, msg=filename)
self.assertIs(alignment.sequences[1], alignment.query, msg=filename)
self.assertEqual(alignment.target.id, "chr22", msg=filename)
if bedN >= 4:
self.assertEqual(alignment.query.id, "mRNA2", msg=filename)
else:
self.assertIsNone(alignment.query.id, msg=filename)
if bedN == 12:
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[2000, 2433, 5601, 6000],
[ 832, 399, 399, 0]])
# fmt: on
),
msg=filename,
)
elif bedN >= 6:
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
numpy.array([[2000, 6000], [4000, 0]]),
),
msg=filename,
)
else:
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
numpy.array([[2000, 6000], [0, 4000]]),
),
msg=filename,
)
if bedN >= 7:
self.assertEqual(alignment.thickStart, 2300, msg=filename)
if bedN >= 8:
self.assertEqual(alignment.thickEnd, 5960, msg=filename)
if bedN >= 9:
self.assertEqual(alignment.itemRgb, "0,255,0", msg=filename)
with self.assertRaises(StopIteration) as cm:
next(alignments)
self.fail(f"More than two alignments reported in {filename}")
def test_writing(self):
"""Test writing the alignments in bed12.bed as BED3 through BED12."""
for bedN in (3, 4, 5, 6, 7, 8, 9, 12):
filename = "bed%d.bed" % bedN
path = os.path.join("Blat", filename)
with open(path) as stream:
original_data = stream.read()
alignments = bed.AlignmentIterator(path)
stream = StringIO()
writer = bed.AlignmentWriter(stream, bedN=bedN)
n = writer.write_file(alignments, mincount=2, maxcount=2)
self.assertEqual(n, 2, msg=filename)
stream.seek(0)
written_data = stream.read()
stream.close()
self.assertEqual(original_data, written_data, msg=filename)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 43.538731
| 95
| 0.562241
| 8,600
| 87,121
| 5.658023
| 0.050698
| 0.131939
| 0.162272
| 0.102345
| 0.948663
| 0.94556
| 0.942313
| 0.940545
| 0.93438
| 0.924104
| 0
| 0.07777
| 0.317972
| 87,121
| 2,000
| 96
| 43.5605
| 0.741143
| 0.042022
| 0
| 0.8026
| 0
| 0
| 0.018697
| 0
| 0
| 0
| 0
| 0
| 0.508865
| 1
| 0.010047
| false
| 0
| 0.007683
| 0
| 0.020095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c19a21c85a851e3fd146a13a062404b9e87628e8
| 40,128
|
py
|
Python
|
lib/jnpr/healthbot/swagger/api/workflow_api.py
|
Juniper/healthbot-py-client
|
49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6
|
[
"Apache-2.0"
] | 10
|
2019-10-23T12:54:37.000Z
|
2022-02-07T19:24:30.000Z
|
lib/jnpr/healthbot/swagger/api/workflow_api.py
|
Juniper/healthbot-py-client
|
49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6
|
[
"Apache-2.0"
] | 5
|
2019-09-30T04:29:25.000Z
|
2022-02-16T12:21:06.000Z
|
lib/jnpr/healthbot/swagger/api/workflow_api.py
|
Juniper/healthbot-py-client
|
49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6
|
[
"Apache-2.0"
] | 4
|
2019-09-30T01:17:48.000Z
|
2020-08-25T07:27:54.000Z
|
# coding: utf-8
"""
Paragon Insights APIs
API interface for PI application # noqa: E501
OpenAPI spec version: 4.0.0
Contact: healthbot-feedback@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from jnpr.healthbot.swagger.api_client import ApiClient
class WorkflowApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_healthbot_workflow_workflow_by_id(self, workflow_name, workflow, **kwargs): # noqa: E501
"""Create workflow by ID # noqa: E501
Create operation of resource: workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_healthbot_workflow_workflow_by_id(workflow_name, workflow, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str workflow_name: ID of workflow-name (required)
:param WorkflowSchema workflow: workflowbody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_healthbot_workflow_workflow_by_id_with_http_info(workflow_name, workflow, **kwargs) # noqa: E501
else:
(data) = self.create_healthbot_workflow_workflow_by_id_with_http_info(workflow_name, workflow, **kwargs) # noqa: E501
return data
def create_healthbot_workflow_workflow_by_id_with_http_info(self, workflow_name, workflow, **kwargs): # noqa: E501
"""Create workflow by ID # noqa: E501
Create operation of resource: workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_healthbot_workflow_workflow_by_id_with_http_info(workflow_name, workflow, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str workflow_name: ID of workflow-name (required)
:param WorkflowSchema workflow: workflowbody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workflow_name', 'workflow', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_healthbot_workflow_workflow_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workflow_name' is set
if ('workflow_name' not in params or
params['workflow_name'] is None):
raise ValueError("Missing the required parameter `workflow_name` when calling `create_healthbot_workflow_workflow_by_id`") # noqa: E501
# verify the required parameter 'workflow' is set
if ('workflow' not in params or
params['workflow'] is None):
raise ValueError("Missing the required parameter `workflow` when calling `create_healthbot_workflow_workflow_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'workflow_name' in params:
path_params['workflow_name'] = params['workflow_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'workflow' in params:
body_params = params['workflow']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/workflow/{workflow_name}/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_healthbot_workflows_workflow_by_id(self, workflows, **kwargs): # noqa: E501
"""Create workflow by ID # noqa: E501
Create/Update multiple workflows. The new content for the existing workflows updates the existing content and the new workflows are created. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_healthbot_workflows_workflow_by_id(workflows, async_req=True)
>>> result = thread.get()
:param async_req bool
:param WorkflowsSchema workflows: workflowbody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_healthbot_workflows_workflow_by_id_with_http_info(workflows, **kwargs) # noqa: E501
else:
(data) = self.create_healthbot_workflows_workflow_by_id_with_http_info(workflows, **kwargs) # noqa: E501
return data
def create_healthbot_workflows_workflow_by_id_with_http_info(self, workflows, **kwargs): # noqa: E501
"""Create workflow by ID # noqa: E501
Create/Update multiple workflows. The new content for the existing workflows updates the existing content and the new workflows are created. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_healthbot_workflows_workflow_by_id_with_http_info(workflows, async_req=True)
>>> result = thread.get()
:param async_req bool
:param WorkflowsSchema workflows: workflowbody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workflows', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_healthbot_workflows_workflow_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workflows' is set
if ('workflows' not in params or
params['workflows'] is None):
raise ValueError("Missing the required parameter `workflows` when calling `create_healthbot_workflows_workflow_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'workflows' in params:
body_params = params['workflows']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/workflows/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_healthbot_workflow_workflow_by_id(self, workflow_name, **kwargs): # noqa: E501
"""Delete workflow by ID # noqa: E501
Delete operation of resource: workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_healthbot_workflow_workflow_by_id(workflow_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str workflow_name: ID of workflow-name (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_healthbot_workflow_workflow_by_id_with_http_info(workflow_name, **kwargs) # noqa: E501
else:
(data) = self.delete_healthbot_workflow_workflow_by_id_with_http_info(workflow_name, **kwargs) # noqa: E501
return data
def delete_healthbot_workflow_workflow_by_id_with_http_info(self, workflow_name, **kwargs): # noqa: E501
"""Delete workflow by ID # noqa: E501
Delete operation of resource: workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_healthbot_workflow_workflow_by_id_with_http_info(workflow_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str workflow_name: ID of workflow-name (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workflow_name', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_healthbot_workflow_workflow_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workflow_name' is set
if ('workflow_name' not in params or
params['workflow_name'] is None):
raise ValueError("Missing the required parameter `workflow_name` when calling `delete_healthbot_workflow_workflow_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'workflow_name' in params:
path_params['workflow_name'] = params['workflow_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/workflow/{workflow_name}/', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_healthbot_workflows_workflow_by_id(self, **kwargs): # noqa: E501
"""Delete workflow by ID # noqa: E501
Delete operation of resource: workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_healthbot_workflows_workflow_by_id(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_healthbot_workflows_workflow_by_id_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.delete_healthbot_workflows_workflow_by_id_with_http_info(**kwargs) # noqa: E501
return data
def delete_healthbot_workflows_workflow_by_id_with_http_info(self, **kwargs): # noqa: E501
"""Delete workflow by ID # noqa: E501
Delete operation of resource: workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_healthbot_workflows_workflow_by_id_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_healthbot_workflows_workflow_by_id" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/workflows/', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def retrieve_healthbot_workflow_workflow(self, **kwargs): # noqa: E501
"""Retrieve workflow # noqa: E501
Retrieve operation of resource: workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_healthbot_workflow_workflow(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_iam_token: authentication header object
:param bool working: true queries undeployed configuration
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.retrieve_healthbot_workflow_workflow_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.retrieve_healthbot_workflow_workflow_with_http_info(**kwargs) # noqa: E501
return data
def retrieve_healthbot_workflow_workflow_with_http_info(self, **kwargs): # noqa: E501
"""Retrieve workflow # noqa: E501
Retrieve operation of resource: workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_healthbot_workflow_workflow_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_iam_token: authentication header object
:param bool working: true queries undeployed configuration
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['x_iam_token', 'working'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method retrieve_healthbot_workflow_workflow" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'working' in params:
query_params.append(('working', params['working'])) # noqa: E501
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/workflow/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[str]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def retrieve_healthbot_workflow_workflow_by_id(self, workflow_name, **kwargs): # noqa: E501
"""Retrieve workflow by ID # noqa: E501
Retrieve operation of resource: workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_healthbot_workflow_workflow_by_id(workflow_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str workflow_name: ID of workflow-name (required)
:param str x_iam_token: authentication header object
:param bool working: true queries undeployed configuration
:return: WorkflowSchema
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.retrieve_healthbot_workflow_workflow_by_id_with_http_info(workflow_name, **kwargs) # noqa: E501
else:
(data) = self.retrieve_healthbot_workflow_workflow_by_id_with_http_info(workflow_name, **kwargs) # noqa: E501
return data
def retrieve_healthbot_workflow_workflow_by_id_with_http_info(self, workflow_name, **kwargs): # noqa: E501
"""Retrieve workflow by ID # noqa: E501
Retrieve operation of resource: workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_healthbot_workflow_workflow_by_id_with_http_info(workflow_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str workflow_name: ID of workflow-name (required)
:param str x_iam_token: authentication header object
:param bool working: true queries undeployed configuration
:return: WorkflowSchema
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workflow_name', 'x_iam_token', 'working'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method retrieve_healthbot_workflow_workflow_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workflow_name' is set
if ('workflow_name' not in params or
params['workflow_name'] is None):
raise ValueError("Missing the required parameter `workflow_name` when calling `retrieve_healthbot_workflow_workflow_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'workflow_name' in params:
path_params['workflow_name'] = params['workflow_name'] # noqa: E501
query_params = []
if 'working' in params:
query_params.append(('working', params['working'])) # noqa: E501
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/workflow/{workflow_name}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WorkflowSchema', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def retrieve_healthbot_workflows_workflow_by_id(self, **kwargs): # noqa: E501
"""Retrieve workflow by ID # noqa: E501
Retrieve operation of resource: workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_healthbot_workflows_workflow_by_id(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_iam_token: authentication header object
:param bool working: true queries undeployed configuration
:return: WorkflowsSchema
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.retrieve_healthbot_workflows_workflow_by_id_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.retrieve_healthbot_workflows_workflow_by_id_with_http_info(**kwargs) # noqa: E501
return data
def retrieve_healthbot_workflows_workflow_by_id_with_http_info(self, **kwargs): # noqa: E501
"""Retrieve workflow by ID # noqa: E501
Retrieve operation of resource: workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_healthbot_workflows_workflow_by_id_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str x_iam_token: authentication header object
:param bool working: true queries undeployed configuration
:return: WorkflowsSchema
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['x_iam_token', 'working'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method retrieve_healthbot_workflows_workflow_by_id" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'working' in params:
query_params.append(('working', params['working'])) # noqa: E501
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/workflows/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WorkflowsSchema', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_healthbot_workflow_workflow_by_id(self, workflow_name, workflow, **kwargs): # noqa: E501
"""Update workflow by ID # noqa: E501
Update operation of resource: workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_healthbot_workflow_workflow_by_id(workflow_name, workflow, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str workflow_name: ID of workflow-name (required)
:param WorkflowSchema workflow: workflowbody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_healthbot_workflow_workflow_by_id_with_http_info(workflow_name, workflow, **kwargs) # noqa: E501
else:
(data) = self.update_healthbot_workflow_workflow_by_id_with_http_info(workflow_name, workflow, **kwargs) # noqa: E501
return data
def update_healthbot_workflow_workflow_by_id_with_http_info(self, workflow_name, workflow, **kwargs): # noqa: E501
"""Update workflow by ID # noqa: E501
Update operation of resource: workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_healthbot_workflow_workflow_by_id_with_http_info(workflow_name, workflow, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str workflow_name: ID of workflow-name (required)
:param WorkflowSchema workflow: workflowbody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workflow_name', 'workflow', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_healthbot_workflow_workflow_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workflow_name' is set
if ('workflow_name' not in params or
params['workflow_name'] is None):
raise ValueError("Missing the required parameter `workflow_name` when calling `update_healthbot_workflow_workflow_by_id`") # noqa: E501
# verify the required parameter 'workflow' is set
if ('workflow' not in params or
params['workflow'] is None):
raise ValueError("Missing the required parameter `workflow` when calling `update_healthbot_workflow_workflow_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'workflow_name' in params:
path_params['workflow_name'] = params['workflow_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'workflow' in params:
body_params = params['workflow']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/workflow/{workflow_name}/', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_healthbot_workflows_workflow_by_id(self, workflows, **kwargs): # noqa: E501
"""Update workflow by ID # noqa: E501
Update operation of resource: workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_healthbot_workflows_workflow_by_id(workflows, async_req=True)
>>> result = thread.get()
:param async_req bool
:param WorkflowsSchema workflows: workflowbody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_healthbot_workflows_workflow_by_id_with_http_info(workflows, **kwargs) # noqa: E501
else:
(data) = self.update_healthbot_workflows_workflow_by_id_with_http_info(workflows, **kwargs) # noqa: E501
return data
def update_healthbot_workflows_workflow_by_id_with_http_info(self, workflows, **kwargs): # noqa: E501
"""Update workflow by ID # noqa: E501
Update operation of resource: workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_healthbot_workflows_workflow_by_id_with_http_info(workflows, async_req=True)
>>> result = thread.get()
:param async_req bool
:param WorkflowsSchema workflows: workflowbody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workflows', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_healthbot_workflows_workflow_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workflows' is set
if ('workflows' not in params or
params['workflows'] is None):
raise ValueError("Missing the required parameter `workflows` when calling `update_healthbot_workflows_workflow_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'workflows' in params:
body_params = params['workflows']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/workflows/', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 41.540373
| 162
| 0.631728
| 4,610
| 40,128
| 5.209328
| 0.038829
| 0.050302
| 0.039975
| 0.026983
| 0.979138
| 0.979138
| 0.978055
| 0.974266
| 0.974266
| 0.970518
| 0
| 0.016065
| 0.283318
| 40,128
| 965
| 163
| 41.58342
| 0.818978
| 0.335227
| 0
| 0.832685
| 1
| 0
| 0.196782
| 0.058104
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036965
| false
| 0
| 0.007782
| 0
| 0.099222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c1e44d2be6ffa25b34750a68514f29f5aa8f2aa4
| 7,087
|
py
|
Python
|
Testes_automatizados/test_cifra_de_vigenere.py
|
GregorioFornetti/CriPython
|
12b5bc64188de3a7c81d8f9d0a392f6edc7ac827
|
[
"MIT"
] | 1
|
2020-05-17T03:00:18.000Z
|
2020-05-17T03:00:18.000Z
|
Testes_automatizados/test_cifra_de_vigenere.py
|
GregorioFornetti/Cripythongrafia
|
12b5bc64188de3a7c81d8f9d0a392f6edc7ac827
|
[
"MIT"
] | 1
|
2020-05-17T15:59:26.000Z
|
2020-05-17T15:59:26.000Z
|
Testes_automatizados/test_cifra_de_vigenere.py
|
GregorioFornetti/Cripythongrafia
|
12b5bc64188de3a7c81d8f9d0a392f6edc7ac827
|
[
"MIT"
] | null | null | null |
from Cifras import cifra_de_vigenere
import dicionarios
# OPÇÃO: APENAS LETRAS
def test_cifra_de_vigenere_apenas_letras_chave_1():
assert cifra_de_vigenere.encriptar_modo_apenas_letras(['b'], 'abcdef') == 'bcdefg'
assert cifra_de_vigenere.traduzir_modo_apenas_letras(['b'], 'bcdefg') == 'abcdef'
def test_cifra_de_vigenere_apenas_letras_chave_2():
assert cifra_de_vigenere.encriptar_modo_apenas_letras(['bca'], 'abcdef') == 'bdcegf'
assert cifra_de_vigenere.traduzir_modo_apenas_letras(['bca'], 'bdcegf') == 'abcdef'
def test_cifra_de_vigenere_apenas_letras_chave_invalida_vazia():
assert cifra_de_vigenere.encriptar_modo_apenas_letras([''], 'abc') == dicionarios.retorna_erro_chave()
assert cifra_de_vigenere.traduzir_modo_apenas_letras([''], 'abc') == dicionarios.retorna_erro_chave()
def test_cifra_de_vigenere_apenas_letras_chave_invalida_caractere_especial():
assert cifra_de_vigenere.encriptar_modo_apenas_letras(['abcé'], 'abc') == dicionarios.retorna_erro_chave()
assert cifra_de_vigenere.traduzir_modo_apenas_letras(['abcé'], 'abc') == dicionarios.retorna_erro_chave()
def test_cifra_de_vigenere_apenas_letras_mensagem_invalida():
assert cifra_de_vigenere.encriptar_modo_apenas_letras(['abc'], '') == dicionarios.retorna_erro_mensagem()
assert cifra_de_vigenere.traduzir_modo_apenas_letras(['abc'], '') == dicionarios.retorna_erro_mensagem()
def test_cifra_de_vigenere_apenas_letras_maiusc_minusc():
assert cifra_de_vigenere.encriptar_modo_apenas_letras(['AbCdE'], 'aBcDe') == 'aCeGi'
assert cifra_de_vigenere.traduzir_modo_apenas_letras(['AbCdE'], 'aCeGi') == 'aBcDe'
def test_cifra_de_vigenere_apenas_letras_caracteres_especiais():
assert cifra_de_vigenere.encriptar_modo_apenas_letras(['bcd'], 'abc ! ? áé .,') == 'bdf ! ? áé .,'
assert cifra_de_vigenere.traduzir_modo_apenas_letras(['bcd'], 'bdf ! ? áé .,') == 'abc ! ? áé .,'
def test_cifra_de_vigenere_apenas_letras_ignorar_andamento_chave_quando_encontrar_caract_especial():
assert cifra_de_vigenere.encriptar_modo_apenas_letras(['abc'], 'abc !í.,áé abc') == 'ace !í.,áé ace'
assert cifra_de_vigenere.traduzir_modo_apenas_letras(['abc'], 'ace !í.,áé ace') == 'abc !í.,áé abc'
def test_cifra_de_vigenere_apenas_letras_volta():
assert cifra_de_vigenere.encriptar_modo_apenas_letras(['bc'], 'zy yz') == 'aa zb'
assert cifra_de_vigenere.traduzir_modo_apenas_letras(['bc'], 'aa zb') == 'zy yz'
def test_cifra_de_vigenere_apenas_letras_chave_maior():
assert cifra_de_vigenere.encriptar_modo_apenas_letras(['flx'], 'oi alo') == 'tt xqz'
assert cifra_de_vigenere.traduzir_modo_apenas_letras(['flx'], 'tt xqz') == 'oi alo'
def test_cifra_de_vigenere_apenas_letras_texto_1():
assert cifra_de_vigenere.encriptar_modo_apenas_letras(['ataque'],
'Vamos invadir a base deles amanhã !') == 'Vtmem mnoatcv a uaiy heeei uqaghã !'
assert cifra_de_vigenere.traduzir_modo_apenas_letras(['ataque'],
'Vtmem mnoatcv a uaiy heeei uqaghã !') == 'Vamos invadir a base deles amanhã !'
def test_cifra_de_vigenere_apenas_letras_texto_2():
assert cifra_de_vigenere.encriptar_modo_apenas_letras(['covid'],
'Cuidado para não se contaminar !' == 'Eidldfc kiuc bãj ah ecibdowiiu !')
assert cifra_de_vigenere.traduzir_modo_apenas_letras(['covid'],
'Eidldfc kiuc bãj ah ecibdowiiu !' == 'Cuidado para não se contaminar !')
# OPÇÃO: VÁRIOS CARACTERES
def test_cifra_de_vigenere_varios_caracteres_chave_1():
assert cifra_de_vigenere.encriptar_modo_varios_caracteres(['!'], 'abcde') == 'bcdef'
assert cifra_de_vigenere.traduzir_modo_varios_caracteres(['!'], 'bcdef') == 'abcde'
def test_cifra_de_vigenere_varios_caracteres_chave_2():
assert cifra_de_vigenere.encriptar_modo_varios_caracteres(['!"#'], 'abcde') == 'bdfeg'
assert cifra_de_vigenere.traduzir_modo_varios_caracteres(['!"#'], 'bdfeg') == 'abcde'
def test_cifra_de_vigenere_varios_caracteres_chave_invalida_vazia():
assert cifra_de_vigenere.encriptar_modo_varios_caracteres([''], 'abc') == dicionarios.retorna_erro_chave()
assert cifra_de_vigenere.traduzir_modo_varios_caracteres([''], 'abc') == dicionarios.retorna_erro_chave()
def test_cifra_de_vigenere_varios_caracteres_chave_invalida_acima_do_limite():
assert cifra_de_vigenere.encriptar_modo_varios_caracteres(['˟'], 'abc') == dicionarios.retorna_erro_chave()
assert cifra_de_vigenere.traduzir_modo_varios_caracteres(['˟'], 'abc') == dicionarios.retorna_erro_chave()
def test_cifra_de_vigenere_varios_caracteres_mensagem_invalida():
assert cifra_de_vigenere.encriptar_modo_varios_caracteres(['abc'], '') == dicionarios.retorna_erro_mensagem()
assert cifra_de_vigenere.traduzir_modo_varios_caracteres(['abc'], '') == dicionarios.retorna_erro_mensagem()
def test_cifra_de_vigenere_varios_caracteres_mensagem_acima_do_limite():
assert cifra_de_vigenere.encriptar_modo_varios_caracteres(['abc'], '˟˟˟˟') == '˟˟˟˟'
assert cifra_de_vigenere.traduzir_modo_varios_caracteres(['abc'], '˟˟˟˟') == '˟˟˟˟'
def test_cifra_de_vigenere_varios_caracteres_maiusc_minusc():
assert cifra_de_vigenere.encriptar_modo_varios_caracteres(['Aa'], 'AaaA') == 'bŤ¤'
assert cifra_de_vigenere.traduzir_modo_varios_caracteres(['Aa'], 'bŤ¤') == 'AaaA'
def test_cifra_de_vigenere_varios_caracteres_verificar_andamento_com_caractere_acima_do_limite():
assert cifra_de_vigenere.encriptar_modo_varios_caracteres(['!"'], 'a˟a˟a˟a') == 'b˟c˟b˟c'
assert cifra_de_vigenere.traduzir_modo_varios_caracteres(['!"'], 'b˟c˟b˟c') == 'a˟a˟a˟a'
def test_cifra_de_vigenere_varios_caracteres_volta():
assert cifra_de_vigenere.encriptar_modo_varios_caracteres(['!"'], '˞˝') == ' '
assert cifra_de_vigenere.traduzir_modo_varios_caracteres(['!"'], ' ') == '˞˝'
def test_cifra_de_vigenere_varios_caracteres_chave_grande():
assert cifra_de_vigenere.encriptar_modo_varios_caracteres(['˜˝'], ' !"#') == '˜˞˞!'
assert cifra_de_vigenere.traduzir_modo_varios_caracteres(['˜˝'], '˜˞˞!') == ' !"#'
def test_cifra_de_vigenere_varios_caracteres_texto_1():
assert cifra_de_vigenere.encriptar_modo_varios_caracteres(['testando'],
'Vamos ver como que essa cifra está funcionando ! Será que está trocando tudo ?'
) == 'ÍÉãæ×nÝ×éeÙæÑàdãìÍsÜ×äÈoÚÑÜéÅnÌåëĦsÝÙßÊÛæÖ×åÈàdpt»ÛéĢnØçÜeÛêØįdæé×ÙØÒÕÖoëÝÚæa°'
assert cifra_de_vigenere.traduzir_modo_varios_caracteres(['testando'],
'ÍÉãæ×nÝ×éeÙæÑàdãìÍsÜ×äÈoÚÑÜéÅnÌåëĦsÝÙßÊÛæÖ×åÈàdpt»ÛéĢnØçÜeÛêØįdæé×ÙØÒÕÖoëÝÚæa°'
) == 'Vamos ver como que essa cifra está funcionando ! Será que está trocando tudo ?'
def test_cifra_de_vigenere_varios_caracteres_texto_2():
assert cifra_de_vigenere.encriptar_modo_varios_caracteres(['cháves diférentes!'],
'Vamos testar agora com uma chave diferente, com espaços e acentos.'
) == '¼ÌĮèÛstÌßÝĪçeÒÞ×èbcÎİæeëmÈiÌıÖÞÖtÌßgËÝĦçÜÛ,dÏØĶrÍäçÉĺpÙhĦvÉÙeÕàØļ¢'
assert cifra_de_vigenere.traduzir_modo_varios_caracteres(['cháves diférentes!'],
'¼ÌĮèÛstÌßÝĪçeÒÞ×èbcÎİæeëmÈiÌıÖÞÖtÌßgËÝĦçÜÛ,dÏØĶrÍäçÉĺpÙhĦvÉÙeÕàØļ¢'
) == 'Vamos testar agora com uma chave diferente, com espaços e acentos.'
| 62.166667
| 113
| 0.773952
| 969
| 7,087
| 5.26935
| 0.145511
| 0.100078
| 0.214454
| 0.197415
| 0.91559
| 0.884058
| 0.856052
| 0.803956
| 0.511359
| 0.315511
| 0
| 0.001253
| 0.099337
| 7,087
| 113
| 114
| 62.716814
| 0.787091
| 0.00635
| 0
| 0
| 0
| 0
| 0.195766
| 0.040915
| 0
| 0
| 0
| 0
| 0.55814
| 1
| 0.27907
| true
| 0
| 0.023256
| 0
| 0.302326
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
de12e6de57333e46835b5fa9a7e529f68d3c1b72
| 584
|
py
|
Python
|
1-1 Braille Translation/test.py
|
srp-dev/foobar.withgoogle
|
d33ea17f74674afcc3be30ba1b171fc77b5e3018
|
[
"MIT"
] | null | null | null |
1-1 Braille Translation/test.py
|
srp-dev/foobar.withgoogle
|
d33ea17f74674afcc3be30ba1b171fc77b5e3018
|
[
"MIT"
] | null | null | null |
1-1 Braille Translation/test.py
|
srp-dev/foobar.withgoogle
|
d33ea17f74674afcc3be30ba1b171fc77b5e3018
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from solution import solution
print("TEST CASE 1")
assert(solution("code") == "100100101010100110100010")
print("TEST CASE 2")
assert(solution("Braille") == "000001110000111010100000010100111000111000100010")
print("TEST CASE 3")
assert(solution("The quick brown fox jumps over the lazy dog") == "000001011110110010100010000000111110101001010100100100101000000000110000111010101010010111101110000000110100101010101101000000010110101001101100111100011100000000101010111001100010111010000000011110110010100010000000111000100000101011101111000000100110101010110110")
| 53.090909
| 333
| 0.863014
| 39
| 584
| 12.923077
| 0.666667
| 0.053571
| 0.077381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.615245
| 0.056507
| 584
| 11
| 333
| 53.090909
| 0.299456
| 0.027397
| 0
| 0
| 0
| 0
| 0.744718
| 0.591549
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0
| true
| 0
| 0.142857
| 0
| 0.142857
| 0.428571
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
de15706540dee4217337c432835273e7c3f9771f
| 309
|
py
|
Python
|
examples/utils-shell.py
|
breddy31/bandit1
|
dab37aace404db7f368084d9a97164e1f4d98fbf
|
[
"Apache-2.0"
] | 1
|
2018-03-08T17:32:42.000Z
|
2018-03-08T17:32:42.000Z
|
examples/utils-shell.py
|
breddy31/bandit1
|
dab37aace404db7f368084d9a97164e1f4d98fbf
|
[
"Apache-2.0"
] | null | null | null |
examples/utils-shell.py
|
breddy31/bandit1
|
dab37aace404db7f368084d9a97164e1f4d98fbf
|
[
"Apache-2.0"
] | null | null | null |
import utils
import utils as u
u.execute('/bin/gcc --version', shell=True)
utils.execute('/bin/gcc --version', shell=True)
u.execute_with_timeout('/bin/gcc --version', shell=True)
utils.execute_with_timeout('/bin/gcc --version', shell=True)
utils.execute_with_timeout(['/bin/gcc', '--version'], shell=False)
| 34.333333
| 66
| 0.734628
| 47
| 309
| 4.702128
| 0.276596
| 0.135747
| 0.294118
| 0.40724
| 0.855204
| 0.855204
| 0.723982
| 0.570136
| 0.570136
| 0.570136
| 0
| 0
| 0.071197
| 309
| 8
| 67
| 38.625
| 0.770035
| 0
| 0
| 0
| 0
| 0
| 0.288026
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e7332edd8404c850488f70e15317990fd7f2c7bd
| 129
|
py
|
Python
|
gadio/models/__init__.py
|
rabbitism/GadioVideo
|
d230c21a9d06eb262e044fbe4b2ed24636290fe2
|
[
"MIT"
] | 194
|
2019-04-12T01:44:55.000Z
|
2022-03-24T06:32:49.000Z
|
gadio/models/__init__.py
|
rabbitism/GadioVideo
|
d230c21a9d06eb262e044fbe4b2ed24636290fe2
|
[
"MIT"
] | 15
|
2019-05-03T02:31:46.000Z
|
2022-02-27T03:49:35.000Z
|
gadio/models/__init__.py
|
rabbitism/GadioVideo
|
d230c21a9d06eb262e044fbe4b2ed24636290fe2
|
[
"MIT"
] | 17
|
2019-04-26T03:26:29.000Z
|
2021-12-27T06:51:40.000Z
|
from gadio.models.asset import *
from gadio.models.page import *
from gadio.models.radio import *
from gadio.models.user import *
| 32.25
| 32
| 0.790698
| 20
| 129
| 5.1
| 0.4
| 0.352941
| 0.588235
| 0.617647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 129
| 4
| 33
| 32.25
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
e7a0932b60d690edf0b58259c23bb6176cc2b6e7
| 5,497
|
py
|
Python
|
migrations/versions/c2d74790a74e_.py
|
Maethorin/concept2
|
05d64805388126a6fb1c412a3fe290b5fa7ed117
|
[
"MIT"
] | null | null | null |
migrations/versions/c2d74790a74e_.py
|
Maethorin/concept2
|
05d64805388126a6fb1c412a3fe290b5fa7ed117
|
[
"MIT"
] | null | null | null |
migrations/versions/c2d74790a74e_.py
|
Maethorin/concept2
|
05d64805388126a6fb1c412a3fe290b5fa7ed117
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: c2d74790a74e
Revises: dda6cfde5752
Create Date: 2016-03-14 15:03:07.746469
"""
# revision identifiers, used by Alembic.
revision = 'c2d74790a74e'
down_revision = 'dda6cfde5752'
from alembic import op
import app
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('atletas', 'celular',
existing_type=sa.VARCHAR(length=11),
nullable=False)
op.alter_column('atletas', 'cpf',
existing_type=sa.VARCHAR(length=11),
nullable=False)
op.alter_column('atletas', 'email',
existing_type=sa.VARCHAR(),
nullable=False)
op.alter_column('atletas', 'nascimento',
existing_type=sa.DATE(),
nullable=False)
op.alter_column('atletas', 'nome',
existing_type=sa.VARCHAR(),
nullable=False)
op.alter_column('atletas', 'senha_hash',
existing_type=sa.VARCHAR(length=128),
nullable=False)
op.alter_column('atletas', 'sexo',
existing_type=sa.VARCHAR(length=2),
nullable=False)
op.alter_column('atletas', 'sobrenome',
existing_type=sa.VARCHAR(),
nullable=False)
op.alter_column('categorias', 'nome',
existing_type=sa.VARCHAR(),
nullable=False)
op.alter_column('eventos', 'data_fim',
existing_type=sa.DATE(),
nullable=False)
op.alter_column('eventos', 'data_inicio',
existing_type=sa.DATE(),
nullable=False)
op.alter_column('eventos', 'resumo',
existing_type=sa.TEXT(),
nullable=False)
op.alter_column('eventos', 'slug',
existing_type=sa.VARCHAR(),
nullable=False)
op.alter_column('eventos', 'titulo',
existing_type=sa.VARCHAR(),
nullable=False)
op.add_column('inscricoes', sa.Column('nome_convidado', sa.String(length=120), nullable=True))
op.add_column('inscricoes', sa.Column('nome_segundo_convidado', sa.String(length=120), nullable=True))
op.alter_column('onde_remar', 'endereco',
existing_type=sa.VARCHAR(),
nullable=False)
op.alter_column('onde_remar', 'nome',
existing_type=sa.VARCHAR(),
nullable=False)
op.alter_column('onde_remar', 'telefone',
existing_type=sa.VARCHAR(),
nullable=False)
op.alter_column('produtos', 'nome',
existing_type=sa.VARCHAR(),
nullable=False)
op.alter_column('provas', 'distancia',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('sub_categorias', 'nome',
existing_type=sa.VARCHAR(),
nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('sub_categorias', 'nome',
existing_type=sa.VARCHAR(),
nullable=True)
op.alter_column('provas', 'distancia',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('produtos', 'nome',
existing_type=sa.VARCHAR(),
nullable=True)
op.alter_column('onde_remar', 'telefone',
existing_type=sa.VARCHAR(),
nullable=True)
op.alter_column('onde_remar', 'nome',
existing_type=sa.VARCHAR(),
nullable=True)
op.alter_column('onde_remar', 'endereco',
existing_type=sa.VARCHAR(),
nullable=True)
op.drop_column('inscricoes', 'nome_segundo_convidado')
op.drop_column('inscricoes', 'nome_convidado')
op.alter_column('eventos', 'titulo',
existing_type=sa.VARCHAR(),
nullable=True)
op.alter_column('eventos', 'slug',
existing_type=sa.VARCHAR(),
nullable=True)
op.alter_column('eventos', 'resumo',
existing_type=sa.TEXT(),
nullable=True)
op.alter_column('eventos', 'data_inicio',
existing_type=sa.DATE(),
nullable=True)
op.alter_column('eventos', 'data_fim',
existing_type=sa.DATE(),
nullable=True)
op.alter_column('categorias', 'nome',
existing_type=sa.VARCHAR(),
nullable=True)
op.alter_column('atletas', 'sobrenome',
existing_type=sa.VARCHAR(),
nullable=True)
op.alter_column('atletas', 'sexo',
existing_type=sa.VARCHAR(length=2),
nullable=True)
op.alter_column('atletas', 'senha_hash',
existing_type=sa.VARCHAR(length=128),
nullable=True)
op.alter_column('atletas', 'nome',
existing_type=sa.VARCHAR(),
nullable=True)
op.alter_column('atletas', 'nascimento',
existing_type=sa.DATE(),
nullable=True)
op.alter_column('atletas', 'email',
existing_type=sa.VARCHAR(),
nullable=True)
op.alter_column('atletas', 'cpf',
existing_type=sa.VARCHAR(length=11),
nullable=True)
op.alter_column('atletas', 'celular',
existing_type=sa.VARCHAR(length=11),
nullable=True)
### end Alembic commands ###
| 36.646667
| 106
| 0.571221
| 572
| 5,497
| 5.305944
| 0.145105
| 0.092257
| 0.171334
| 0.207578
| 0.902142
| 0.882702
| 0.875124
| 0.849094
| 0.823064
| 0.814168
| 0
| 0.01763
| 0.298345
| 5,497
| 149
| 107
| 36.892617
| 0.769251
| 0.052938
| 0
| 0.916031
| 0
| 0
| 0.137251
| 0.008506
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015267
| false
| 0
| 0.022901
| 0
| 0.038168
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e7a53dfd3a5bce69d42bac2816c10d614821c199
| 360
|
py
|
Python
|
terrascript/data/triton.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/data/triton.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/data/triton.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/data/triton.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:29:19 UTC)
#
# For imports without namespace, e.g.
#
# >>> import terrascript.data.triton
#
# instead of
#
# >>> import terrascript.data.joyent.triton
#
# This is only available for 'official' and 'partner' providers.
from terrascript.data.joyent.triton import *
| 24
| 73
| 0.730556
| 49
| 360
| 5.367347
| 0.714286
| 0.228137
| 0.159696
| 0.205323
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038835
| 0.141667
| 360
| 14
| 74
| 25.714286
| 0.812298
| 0.802778
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e7bd959a5fa8aa5db32d8008db09f47883ba0a66
| 1,834
|
py
|
Python
|
uttut/pipeline/tests/mock_factory.py
|
Yoctol/uttut
|
31ed12449d38fac58f50178c4ade8b011f1fcfbd
|
[
"MIT"
] | 2
|
2018-03-27T03:03:37.000Z
|
2018-05-23T05:49:34.000Z
|
uttut/pipeline/tests/mock_factory.py
|
Yoctol/uttut
|
31ed12449d38fac58f50178c4ade8b011f1fcfbd
|
[
"MIT"
] | 125
|
2018-04-06T14:07:36.000Z
|
2019-12-19T03:54:19.000Z
|
uttut/pipeline/tests/mock_factory.py
|
Yoctol/uttut
|
31ed12449d38fac58f50178c4ade8b011f1fcfbd
|
[
"MIT"
] | null | null | null |
from typing import List
from ..ops.base import Operator, LabelAligner
class MockLabelAligner(LabelAligner):
def _transform(self, labels):
return labels
def _inverse_transform(self, labels):
return labels
class Str2Str(Operator):
_input_type = str
_output_type = str
def __init__(self, **kwargs):
self.kwargs = kwargs
def _transform(self, input_sequence: str): # type: ignore
return input_sequence, MockLabelAligner(
edit={},
input_sequence=input_sequence,
output_length=len(input_sequence),
)
class Lst2Lst(Operator):
_input_type = list
_output_type = list
def __init__(self, **kwargs):
self.kwargs = kwargs
def _transform(self, input_sequence: List[str]): # type: ignore
return input_sequence, MockLabelAligner(
edit={},
input_sequence=input_sequence,
output_length=len(input_sequence),
)
class Str2Lst(Operator):
_input_type = str
_output_type = list
def __init__(self, **kwargs):
self.kwargs = kwargs
def _transform(self, input_sequence: str): # type: ignore
output_sequence = list(input_sequence)
return output_sequence, MockLabelAligner(
edit={},
input_sequence=input_sequence,
output_length=len(output_sequence),
)
class Lst2Str(Operator):
_input_type = list
_output_type = str
def __init__(self, **kwargs):
self.kwargs = kwargs
def _transform(self, input_sequence: List[str]): # type: ignore
output_sequence = ''.join(input_sequence)
return output_sequence, MockLabelAligner(
edit={},
input_sequence=input_sequence,
output_length=len(output_sequence),
)
| 23.21519
| 68
| 0.633588
| 190
| 1,834
| 5.763158
| 0.168421
| 0.213699
| 0.073059
| 0.0621
| 0.863927
| 0.781735
| 0.706849
| 0.706849
| 0.706849
| 0.706849
| 0
| 0.003026
| 0.279171
| 1,834
| 78
| 69
| 23.512821
| 0.825265
| 0.027808
| 0
| 0.716981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.188679
| false
| 0
| 0.037736
| 0.075472
| 0.584906
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
e7c8a98783324c37f5de43c7f8f84c399bf7c356
| 100
|
py
|
Python
|
app/oauth/__init__.py
|
jkedra/flask-dance-multi-provider
|
4a2dc0a3dcd61af8fe62c0af20d1afe7d44e9667
|
[
"MIT"
] | 12
|
2019-04-02T09:58:14.000Z
|
2021-03-28T12:00:47.000Z
|
app/oauth/__init__.py
|
jkedra/flask-dance-multi-provider
|
4a2dc0a3dcd61af8fe62c0af20d1afe7d44e9667
|
[
"MIT"
] | 1
|
2019-05-03T12:06:42.000Z
|
2019-05-03T12:06:42.000Z
|
app/oauth/__init__.py
|
jkedra/flask-dance-multi-provider
|
4a2dc0a3dcd61af8fe62c0af20d1afe7d44e9667
|
[
"MIT"
] | 6
|
2019-05-03T09:42:45.000Z
|
2021-02-17T01:58:55.000Z
|
from .github import blueprint as github_blueprint
from .google import blueprint as google_blueprint
| 33.333333
| 49
| 0.86
| 14
| 100
| 6
| 0.428571
| 0.357143
| 0.404762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 100
| 2
| 50
| 50
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
99bed8a89cdde11436914cb98553a54875e67f3f
| 68
|
py
|
Python
|
src/nashpy/integer_pivoting/__init__.py
|
katiemcgoldrick/Nashpy
|
352747a8970e24a1dcc27f16c68f3eedf62f28fa
|
[
"MIT"
] | 212
|
2016-11-06T12:44:08.000Z
|
2022-03-10T03:05:27.000Z
|
src/nashpy/integer_pivoting/__init__.py
|
katiemcgoldrick/Nashpy
|
352747a8970e24a1dcc27f16c68f3eedf62f28fa
|
[
"MIT"
] | 93
|
2016-11-06T12:34:14.000Z
|
2022-03-25T10:57:17.000Z
|
src/nashpy/integer_pivoting/__init__.py
|
katiemcgoldrick/Nashpy
|
352747a8970e24a1dcc27f16c68f3eedf62f28fa
|
[
"MIT"
] | 51
|
2016-11-06T12:31:22.000Z
|
2022-03-29T10:45:53.000Z
|
from .integer_pivoting import *
from .integer_pivoting_lex import *
| 22.666667
| 35
| 0.823529
| 9
| 68
| 5.888889
| 0.555556
| 0.415094
| 0.716981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 68
| 2
| 36
| 34
| 0.883333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
99d1d6883868e0b332ffcb473bfdc3818a9c5db3
| 7,317
|
py
|
Python
|
src/Test.py
|
Fzaero/Deep-Effect-Trajectory-Prediction-in-Robot-Manipulation
|
dab30a9f4f404b42835f545e0488305bc1bc76da
|
[
"MIT"
] | 2
|
2020-05-15T23:32:24.000Z
|
2021-02-03T15:00:41.000Z
|
src/Test.py
|
Fzaero/Deep-Effect-Trajectory-Prediction-in-Robot-Manipulation
|
dab30a9f4f404b42835f545e0488305bc1bc76da
|
[
"MIT"
] | 2
|
2020-05-11T17:51:05.000Z
|
2020-05-19T23:10:29.000Z
|
src/Test.py
|
Fzaero/Deep-Effect-Trajectory-Prediction-in-Robot-Manipulation
|
dab30a9f4f404b42835f545e0488305bc1bc76da
|
[
"MIT"
] | 1
|
2019-07-31T02:45:32.000Z
|
2019-07-31T02:45:32.000Z
|
#!/usr/bin/env python
from os import listdir
from os.path import isfile, join
from keras.models import load_model
from keras.preprocessing import image
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from numpy import array
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM,Dropout
import keras
import copy
import math
import numpy as np
import scipy.stats as stats
import pylab as pl
from sklearn.externals import joblib
from Prepare_Data import *
dataset_path='trajectories/'
scalers_path='scalers/'
num_of_Data=1857
onlyfiles2 = [str(s) + '.jpg' for s in range(1,1857)]
def_traj_lenght=74
started_moving_threshold=0.0000001
object_slip_threshold=0.03
## TODO: SEPERATE DATA PREPARATION AND RUNNING MODEL
def validate_model(modelNumber,features,FeatureSize,seq_length,model):
scaler_filename = scalers_path+ "scaler_x.save"
scaler_x= joblib.load(scaler_filename)
trajectoryOrij=list()
trajectory=list()
val_len = 100
if modelNumber==3:
series,_,_,_,indexes=get_data_for_model(modelNumber,features,FeatureSize)
sample=np.zeros([val_len,seq_length,6])
sample2=np.zeros([val_len,128,128,1])
max_traj_len=0
trajectoryIndexVal=indexes[1400:1500]
for ii in range(val_len):
i = trajectoryIndexVal[ii]
imageAddres='images/'+str(i+1)+'.jpg'
trajectoryOrij.append(series[i])
trajectory.append(np.zeros_like(series[i]))
sample2[ii,:,:,:]=image.img_to_array(image.load_img(imageAddres, color_mode = "grayscale").resize((128,128)))
if len(series[i])>max_traj_len:
max_traj_len=len(series[i])
for t in range(seq_length):
sample[ii,t,:6]=scaler_x.transform(series[i][0:1,:])
sample2=(sample2.astype('float32') / 255. / 0.5).astype('int32')
for t in range(max_traj_len-1):
result = model.predict([sample2,sample], batch_size=16, verbose=0)
for i in range(val_len):
if t<seq_length-1:
if t<len(trajectoryOrij[i])-1:
trajectory[i][t+1,:]=scaler_x.inverse_transform(result[i:i+1,t,:])
sample[i,t+1,:6]=result[i,t,:]
else:
if t<len(trajectoryOrij[i])-1:
trajectory[i][t+1,:]=scaler_x.inverse_transform(result[i:i+1,-1,:])
sample[i,:seq_length-1,:]=sample[i,1:seq_length,:]
sample[i,seq_length-1:seq_length,:6]=result[i,-1,:]
else :
scaler_filename = scalers_path+"scaler_f"+str(features)+"_fs"+ str(FeatureSize)+".save"
scaler_features = joblib.load(scaler_filename)
series,selected_features,_,_,indexes= get_data_for_model(modelNumber,features,FeatureSize)
trajectoryIndexVal=indexes[1400:1500]
max_traj_len=0
if modelNumber==1:
sample=np.zeros([val_len,seq_length,FeatureSize+6])
if modelNumber==2:
sample=np.zeros([val_len,seq_length,6])
sample2=np.zeros([val_len,seq_length,FeatureSize])
for ii in range(val_len):
i = trajectoryIndexVal[ii]
trajectoryOrij.append(series[i])
trajectory.append(np.zeros_like(series[i]))
if len(series[i])>max_traj_len:
max_traj_len=len(series[i])
for t in range(seq_length):
sample[ii,t,:6]=scaler_x.transform(series[i][0:1,:])
if modelNumber==1:
sample[ii,t,6:]=scaler_features.transform(selected_features[i:i+1,:])
if modelNumber==2:
sample2[ii,t,:]=scaler_features.transform(selected_features[i:i+1,:])
for t in range(max_traj_len-1):
if modelNumber==1:
result = model.predict(sample, batch_size=100, verbose=0)[:,-1,:]
if modelNumber==2:
result = model.predict({sample,sample2}, batch_size=100, verbose=0)[:,-1,:]
for i in range(val_len):
if t<len(trajectoryOrij[i])-1:
trajectory[i][t+1,:]=scaler_x.inverse_transform(result[i:i+1,:])
sample[i,:seq_length-1,:]=sample[i,1:seq_length,:]
sample[i,seq_length-1:seq_length,:6]=result[i,:]
errorsxyz=list()
for ii in range(val_len): #5 for training visualization
errorsxyz.append(math.sqrt(sum((trajectoryOrij[ii][-1,:3]-trajectory[ii][-1,:3])**2)))
print ('Total XYZ MSE Error'+' Mean='+str(np.mean(errorsxyz)*100)[:4]+' Std='+str(np.std(errorsxyz)*100)[:4])
return np.mean(errorsxyz),np.std(errorsxyz)*100
def test_model(modelNumber,features,FeatureSize,seq_length,model):
scaler_filename = scalers_path+ "scaler_x.save"
scaler_x= joblib.load(scaler_filename)
trajectoryOrij=list()
trajectory=list()
val_len = 250
if modelNumber==3:
series,_,_,_,indexes=get_data_for_model(modelNumber,features,FeatureSize)
sample=np.zeros([val_len,seq_length,6])
sample2=np.zeros([val_len,128,128,1])
sample3=np.zeros([val_len,seq_length,10])
max_traj_len=0
trajectoryIndexVal=indexes[1500:1750]
for ii in range(val_len):
i = trajectoryIndexVal[ii]
imageAddres='images/'+str(i+1)+'.jpg'
trajectoryOrij.append(series[i])
trajectory.append(np.zeros_like(series[i]))
sample2[ii,:,:,:]=image.img_to_array(image.load_img(imageAddres, color_mode = "grayscale").resize((128,128)))
if len(series[i])>max_traj_len:
max_traj_len=len(series[i])
for t in range(seq_length):
sample[ii,t,:6]=scaler_x.transform(series[i][0:1,:])
sample2=(sample2.astype('float32') / 255. / 0.5).astype('int32')
for t in range(max_traj_len-1):
result = model.predict([sample2,sample,sample3], batch_size=250, verbose=0)
for i in range(val_len):
if t<len(trajectoryOrij[i])-1:
trajectory[i][t+1,:]=scaler_x.inverse_transform(result[i:i+1,:])
sample[i,:seq_length-1,:]=sample[i,1:seq_length,:]
sample[i,seq_length-1:seq_length,:6]=result[i,:]
else :
scaler_filename = scalers_path+"scaler_f"+str(features)+"_fs"+ str(FeatureSize)+".save"
scaler_features = joblib.load(scaler_filename)
series,selected_features,_,_,indexes= get_data_for_model(modelNumber,features,FeatureSize)
trajectoryIndexVal=indexes[1500:1750]
max_traj_len=0
if modelNumber==1:
sample=np.zeros([val_len,seq_length,FeatureSize+6])
if modelNumber==2:
sample=np.zeros([val_len,seq_length,6])
sample2=np.zeros([val_len,seq_length,FeatureSize])
for ii in range(val_len):
i = trajectoryIndexVal[ii]
trajectoryOrij.append(series[i])
trajectory.append(np.zeros_like(series[i]))
if len(series[i])>max_traj_len:
max_traj_len=len(series[i])
for t in range(seq_length):
sample[ii,t,:6]=scaler_x.transform(series[i][0:1,:])
if modelNumber==1:
sample[ii,t,6:]=scaler_features.transform(selected_features[i:i+1,:])
if modelNumber==2:
sample2[ii,t,:]=scaler_features.transform(selected_features[i:i+1,:])
for t in range(max_traj_len-1):
if modelNumber==1:
result = model.predict(sample, batch_size=250, verbose=0)
if modelNumber==2:
result = model.predict({sample,sample2}, batch_size=250, verbose=0)
for i in range(val_len):
if t<len(trajectoryOrij[i])-1:
trajectory[i][t+1,:]=scaler_x.inverse_transform(result[i:i+1,:])
sample[i,:seq_length-1,:]=sample[i,1:seq_length,:]
sample[i,seq_length-1:seq_length,:6]=result[i,:]
errorsxyz=list()
for ii in range(val_len): #5 for training visualization
errorsxyz.append(math.sqrt(sum((trajectoryOrij[ii][-1,:3]-trajectory[ii][-1,:3])**2)))
print ('Total XYZ MSE Error'+' Mean='+str(np.mean(errorsxyz)*100)[:4]+' Std='+str(np.std(errorsxyz)*100)[:4])
return np.mean(errorsxyz),np.std(errorsxyz)*100,trajectoryOrij,trajectory
| 41.338983
| 112
| 0.720787
| 1,154
| 7,317
| 4.407279
| 0.136915
| 0.056626
| 0.031459
| 0.028116
| 0.837004
| 0.834841
| 0.81164
| 0.81164
| 0.81164
| 0.81164
| 0
| 0.04177
| 0.116578
| 7,317
| 176
| 113
| 41.573864
| 0.74505
| 0.01722
| 0
| 0.733728
| 0
| 0
| 0.02881
| 0
| 0
| 0
| 0
| 0.005682
| 0
| 1
| 0.011834
| false
| 0
| 0.118343
| 0
| 0.142012
| 0.011834
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
41952a408bea3ce43802094cf484df4217a317e3
| 68,711
|
py
|
Python
|
BMSSlibmod/ConstitutivePromoterLibrary_.py
|
EngBioNUS/BMSSlib
|
d521362f4ee66b937ec1e46a0515ed4adefb83e8
|
[
"Apache-2.0"
] | 6
|
2019-05-02T20:42:52.000Z
|
2022-02-20T04:58:09.000Z
|
BMSSlibmod/ConstitutivePromoterLibrary_.py
|
EngBioNUS/BMSSlib
|
d521362f4ee66b937ec1e46a0515ed4adefb83e8
|
[
"Apache-2.0"
] | null | null | null |
BMSSlibmod/ConstitutivePromoterLibrary_.py
|
EngBioNUS/BMSSlib
|
d521362f4ee66b937ec1e46a0515ed4adefb83e8
|
[
"Apache-2.0"
] | 7
|
2020-02-03T02:57:17.000Z
|
2021-01-04T03:30:05.000Z
|
# -*- coding: utf-8 -*-
"""
Released on April 29, 2019
@author: Yeoh Jing Wui <bchyjw@nus.edu.sg>; Poh Chueh Loo <poh.chuehloo@nus.edu.sg>
The code is part of BMSS software.
Copyright (c) 2019, National University of Singapore.
"""
import numpy as np
import constrNMPy as cNM
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.optimize import differential_evolution
class ConstitutivePromoterLibrary:
### ODE Model for Constitutive Promoter ###
def solveODE_ConstDouble(y, t, param, TotalDataSet, Operation = 'Solve'): #must include the comma at the end
# Dependent variables
mRNA = y[0] # Col 0 of ODESoln
Pep = y[1] # Col 1 of ODESoln
# Parameters
syn_mRNA = param[0]
deg_mRNA = 0.1386 # Fixed as ln2/5(mins) = 0.1386
syn_Pep = param[1]
deg_Pep = param[2]
# Differential equations
dmRNA_dt = '(syn_mRNA)-(deg_mRNA * mRNA)'
dPep_dt = '(syn_Pep*mRNA)-(deg_Pep*Pep)'
# Return differential equations solution
if Operation == 'Solve':
return [eval(dmRNA_dt), eval(dPep_dt)]
elif Operation == 'GetODE':
return [dmRNA_dt, dPep_dt]
else:
print('Error: Please Enter the correct Operation for solveODE function')
def solveODE_ConstDoubleKMat(y, t, param, TotalDataSet, Operation = 'Solve'): #must include the comma at the end
# Dependent variables
mRNA = y[0] # Col 0 of ODESoln
Pep = y[1] # Col 1 of ODESoln
Pepm = y[2]
# Parameters
syn_mRNA = param[0]
deg_mRNA = 0.1386 # Fixed as ln2/5(mins) = 0.1386
syn_Pep = param[1]
deg_Pep = param[2]
Kmature = param[3]
#Kmature = 0.0316 #log(2)/21.9 min
# Differential equations
dmRNA_dt = '(syn_mRNA)-(deg_mRNA * mRNA)'
dPep_dt = '(syn_Pep*mRNA)-(Kmature*Pep)'
dPepm_dt = '(Kmature*Pep)-(deg_Pep*Pepm)'
# Return differential equations solution
if Operation == 'Solve':
return [eval(dmRNA_dt), eval(dPep_dt), eval(dPepm_dt)]
elif Operation == 'GetODE':
return [dmRNA_dt, dPep_dt, dPepm_dt]
else:
print('Error: Please Enter the correct Operation for solveODE function')
### Single-ODE Model for Constitutive Promoter ###
def solveODE_ConstSingle(y, t, param, TotalDataSet, Operation = 'Solve'):
# Dependent variables
Pep = y[0] # Col 1 of ODESoln
# Parameters
syn_Pep = param[0]
deg_Pep = param[1]
# Differential equations
dPep_dt = '(syn_Pep)-(deg_Pep*Pep)'
# Return differential equations solution
# Return differential equations solution
if Operation == 'Solve':
return [eval(dPep_dt)]
elif Operation == 'GetODE':
return [dPep_dt]
else:
print('Error: Please Enter the correct Operation for solveODE function')
def solveODE_ConstSingleKMat(y, t, param, TotalDataSet, Operation = 'Solve'):
# Dependent variables
Pep = y[0] # Col 1 of ODESoln
Pepm = y[1]
# Parameters
syn_Pep = param[0]
deg_Pep = param[1]
Kmature = param[2]
#Kmature = 0.0316 # log(2)/21.9 (mRFP1 at 37'C)
# Differential equations
dPep_dt = '(syn_Pep)-(Kmature*Pep)'
dPepm_dt = '(Kmature*Pep)-(deg_Pep*Pepm)'
# Return differential equations solution
# Return differential equations solution
if Operation == 'Solve':
return [eval(dPep_dt), eval(dPepm_dt)]
elif Operation == 'GetODE':
return [dPep_dt, dPepm_dt]
else:
print('Error: Please Enter the correct Operation for solveODE function')
### Multi-ODE Model (different promoters same RBS) for Constitutive Promoter ###
def solveODE_MultiDoubleFixRBS(y, t, param, TotalDataSet, Operation = 'Solve'): #must include the comma at the end
# Dependent variables
mRNA1 = y[0] # Col 0 of ODESoln
Pep1 = y[1] # Col 1 of ODESoln
mRNA2 = y[2] # Col 0 of ODESoln
Pep2 = y[3]
# Parameters
deg_mRNA = 0.1386 # Fixed as ln2/5(mins) = 0.1386
syn_Pep = param[0]
deg_Pep = param[1]
syn_mRNA1 = param[2]
syn_mRNA2 = param[3]
# Differential equations
dmRNA1_dt = '(syn_mRNA1)-(deg_mRNA*mRNA1)'
dPep1_dt = '(syn_Pep*mRNA1)-(deg_Pep*Pep1)'
dmRNA2_dt = '(syn_mRNA2)-(deg_mRNA*mRNA2)'
dPep2_dt = '(syn_Pep*mRNA2)-(deg_Pep*Pep2)'
dys = [eval(dmRNA1_dt), eval(dPep1_dt), eval(dmRNA2_dt), eval(dPep2_dt)]
dy = [dmRNA1_dt, dPep1_dt, dmRNA2_dt, dPep2_dt]
if TotalDataSet >= 3:
# Dependent variables
mRNA3 = y[4] # Col 0 of ODESoln
Pep3 = y[5] # Col 1 of ODESoln
syn_mRNA3 = param[4]
# Differential equations
dmRNA3_dt = '(syn_mRNA3)-(deg_mRNA*mRNA3)'
dPep3_dt = '(syn_Pep*mRNA3)-(deg_Pep*Pep3)'
dys = dys + [eval(dmRNA3_dt), eval(dPep3_dt)]
dy = dy + [dmRNA3_dt, dPep3_dt]
if TotalDataSet >= 4:
# Dependent variables
mRNA4 = y[6]
Pep4 = y[7]
syn_mRNA4 = param[5]
# Differential equations
dmRNA4_dt = '(syn_mRNA4)-(deg_mRNA*mRNA4)'
dPep4_dt = '(syn_Pep*mRNA4)-(deg_Pep*Pep4)'
dys = dys + [eval(dmRNA4_dt), eval(dPep4_dt)]
dy = dy + [dmRNA4_dt, dPep4_dt]
if TotalDataSet >= 5:
# Dependent variables
mRNA5 = y[8]
Pep5 = y[9]
syn_mRNA5 = param[6]
# Differential equations
dmRNA5_dt = '(syn_mRNA5)-(deg_mRNA*mRNA5)'
dPep5_dt = '(syn_Pep*mRNA5)-(deg_Pep*Pep5)'
dys = dys + [eval(dmRNA5_dt), eval(dPep5_dt)]
dy = dy + [dmRNA5_dt, dPep5_dt]
if TotalDataSet >= 6:
# Dependent variables
mRNA6 = y[10]
Pep6 = y[11]
syn_mRNA6 = param[7]
# Differential equations
dmRNA6_dt = '(syn_mRNA6)-(deg_mRNA*mRNA6)'
dPep6_dt = '(syn_Pep*mRNA6)-(deg_Pep*Pep6)'
dys = dys + [eval(dmRNA6_dt), eval(dPep6_dt)]
dy = dy + [dmRNA6_dt, dPep6_dt]
if Operation == 'Solve':
return dys
elif Operation == 'GetODE':
return dy
else:
print('Error: Please Enter the correct Operation for solveODE function')
def solveODE_MultiDoubleFixRBSKMat(y, t, param, TotalDataSet, Operation = 'Solve'): #must include the comma at the end
# Dependent variables
mRNA1 = y[0] # Col 0 of ODESoln
Pep1 = y[1] # Col 1 of ODESoln
Pepm1 = y[2]
mRNA2 = y[3] # Col 0 of ODESoln
Pep2 = y[4]
Pepm2 = y[5]
# Parameters
deg_mRNA = 0.1386 # Fixed as ln2/5(mins) = 0.1386
syn_Pep = param[0]
deg_Pep = param[1]
syn_mRNA1 = param[2]
syn_mRNA2 = param[3]
Kmature = param[4]
# Differential equations
dmRNA1_dt = '(syn_mRNA1)-(deg_mRNA*mRNA1)'
dPep1_dt = '(syn_Pep*mRNA1)-(Kmature*Pep1)'
dPepm1_dt = '(Kmature*Pep1)-(deg_Pep*Pepm1)'
dmRNA2_dt = '(syn_mRNA2)-(deg_mRNA*mRNA2)'
dPep2_dt = '(syn_Pep*mRNA2)-(Kmature*Pep2)'
dPepm2_dt = '(Kmature*Pep2)-(deg_Pep*Pepm2)'
dys = [eval(dmRNA1_dt), eval(dPep1_dt), eval(dPepm1_dt), eval(dmRNA2_dt), eval(dPep2_dt), eval(dPepm2_dt)]
dy = [dmRNA1_dt, dPep1_dt, dPepm1_dt, dmRNA2_dt, dPep2_dt, dPepm2_dt]
if TotalDataSet >= 3:
# Dependent variables
mRNA3 = y[6] # Col 0 of ODESoln
Pep3 = y[7] # Col 1 of ODESoln
Pepm3 = y[8]
syn_mRNA3 = param[5]
# Differential equations
dmRNA3_dt = '(syn_mRNA3)-(deg_mRNA*mRNA3)'
dPep3_dt = '(syn_Pep*mRNA3)-(Kmature*Pep3)'
dPepm3_dt = '(Kmature*Pep3)-(deg_Pep*Pepm3)'
dys = dys + [eval(dmRNA3_dt), eval(dPep3_dt), eval(dPepm3_dt)]
dy = dy + [dmRNA3_dt, dPep3_dt, dPepm3_dt]
if TotalDataSet >= 4:
# Dependent variables
mRNA4 = y[9]
Pep4 = y[10]
Pepm4 = y[11]
syn_mRNA4 = param[6]
# Differential equations
dmRNA4_dt = '(syn_mRNA4)-(deg_mRNA*mRNA4)'
dPep4_dt = '(syn_Pep*mRNA4)-(Kmature*Pep4)'
dPepm4_dt = '(Kmature*Pep4)-(deg_Pep*Pepm4)'
dys = dys + [eval(dmRNA4_dt), eval(dPep4_dt), eval(dPepm4_dt)]
dy = dy + [dmRNA4_dt, dPep4_dt, dPepm4_dt]
if TotalDataSet >= 5:
# Dependent variables
mRNA5 = y[12]
Pep5 = y[13]
Pepm5 = y[14]
syn_mRNA5 = param[7]
# Differential equations
dmRNA5_dt = '(syn_mRNA5)-(deg_mRNA*mRNA5)'
dPep5_dt = '(syn_Pep*mRNA5)-(Kmature*Pep5)'
dPepm5_dt = '(Kmature*Pep5)-(deg_Pep*Pepm5)'
dys = dys + [eval(dmRNA5_dt), eval(dPep5_dt), eval(dPepm5_dt)]
dy = dy + [dmRNA5_dt, dPep5_dt, dPepm5_dt]
if TotalDataSet >= 6:
# Dependent variables
mRNA6 = y[15]
Pep6 = y[16]
Pepm6 = y[17]
syn_mRNA6 = param[8]
# Differential equations
dmRNA6_dt = '(syn_mRNA6)-(deg_mRNA*mRNA6)'
dPep6_dt = '(syn_Pep*mRNA6)-(Kmature*Pep6)'
dPepm6_dt = '(Kmature*Pep6)-(deg_Pep*Pepm6)'
dys = dys + [eval(dmRNA6_dt), eval(dPep6_dt), eval(dPepm6_dt)]
dy = dy + [dmRNA6_dt, dPep6_dt, dPepm6_dt]
if Operation == 'Solve':
return dys
elif Operation == 'GetODE':
return dy
else:
print('Error: Please Enter the correct Operation for solveODE function')
def solveODE_MultiSingleFixRBS(y, t, param, TotalDataSet, Operation = 'Solve'): #must include the comma at the end
# Dependent variables
Pep1 = y[0] # Col 1 of ODESoln
Pep2 = y[1]
# Parameters
deg_Pep = param[0]
syn_Pep1 = param[1]
syn_Pep2 = param[2]
# Differential equations
dPep1_dt = '(syn_Pep1)-(deg_Pep*Pep1)'
dPep2_dt = '(syn_Pep2)-(deg_Pep*Pep2)'
dys = [eval(dPep1_dt), eval(dPep2_dt)]
dy = [dPep1_dt, dPep2_dt]
if TotalDataSet >= 3:
# Dependent variables
Pep3 = y[2] # Col 1 of ODESoln
syn_Pep3 = param[3]
# Differential equations
dPep3_dt = '(syn_Pep3)-(deg_Pep*Pep3)'
dys = dys + [eval(dPep3_dt)]
dy = dy + [dPep3_dt]
if TotalDataSet >= 4:
# Dependent variables
Pep4 = y[3]
syn_Pep4 = param[4]
# Differential equations
dPep4_dt = '(syn_Pep4)-(deg_Pep*Pep4)'
dys = dys + [eval(dPep4_dt)]
dy = dy + [dPep4_dt]
if TotalDataSet >= 5:
# Dependent variables
Pep5 = y[4]
syn_Pep5 = param[5]
# Differential equations
dPep5_dt = '(syn_Pep5)-(deg_Pep*Pep5)'
dys = dys + [eval(dPep5_dt)]
dy = dy + [dPep5_dt]
if TotalDataSet >= 6:
# Dependent variables
Pep6 = y[5]
syn_Pep6 = param[6]
# Differential equations
dPep6_dt = '(syn_Pep6)-(deg_Pep*Pep6)'
dys = dys + [eval(dPep6_dt)]
dy = dy + [dPep6_dt]
if Operation == 'Solve':
return dys
elif Operation == 'GetODE':
return dy
else:
print('Error: Please Enter the correct Operation for solveODE function')
def solveODE_MultiSingleFixRBSKMat(y, t, param, TotalDataSet, Operation = 'Solve'): #must include the comma at the end
# Dependent variables
Pep1 = y[0] # Col 1 of ODESoln
Pepm1 = y[1]
Pep2 = y[2]
Pepm2 = y[3]
# Parameters
deg_Pep = param[0]
syn_Pep1 = param[1]
syn_Pep2 = param[2]
Kmature = param[3]
# Differential equations
dPep1_dt = '(syn_Pep1)-(Kmature*Pep1)'
dPepm1_dt = '(Kmature*Pep1)-(deg_Pep*Pepm1)'
dPep2_dt = '(syn_Pep2)-(Kmature*Pep2)'
dPepm2_dt = '(Kmature*Pep2)-(deg_Pep*Pepm2)'
dys = [eval(dPep1_dt), eval(dPepm1_dt), eval(dPep2_dt), eval(dPepm2_dt)]
dy = [dPep1_dt, dPepm1_dt, dPep2_dt, dPepm2_dt]
if TotalDataSet >= 3:
# Dependent variables
Pep3 = y[4] # Col 1 of ODESoln
Pepm3 = y[5]
syn_Pep3 = param[4]
# Differential equations
dPep3_dt = '(syn_Pep3)-(Kmature*Pep3)'
dPepm3_dt = '(Kmature*Pep3)-(deg_Pep*Pepm3)'
dys = dys + [eval(dPep3_dt), eval(dPepm3_dt)]
dy = dy + [dPep3_dt, dPepm3_dt]
if TotalDataSet >= 4:
# Dependent variables
Pep4 = y[6]
Pepm4 = y[7]
syn_Pep4 = param[5]
# Differential equations
dPep4_dt = '(syn_Pep4)-(Kmature*Pep4)'
dPepm4_dt = '(Kmature*Pep4)-(deg_Pep*Pepm4)'
dys = dys + [eval(dPep4_dt), eval(dPepm4_dt)]
dy = dy + [dPep4_dt, dPepm4_dt]
if TotalDataSet >= 5:
# Dependent variables
Pep5 = y[8]
Pepm5 = y[9]
syn_Pep5 = param[6]
# Differential equations
dPep5_dt = '(syn_Pep5)-(Kmature*Pep5)'
dPepm5_dt = '(Kmature*Pep5)-(deg_Pep*Pepm5)'
dys = dys + [eval(dPep5_dt), eval(dPepm5_dt)]
dy = dy + [dPep5_dt, dPepm5_dt]
if TotalDataSet >= 6:
# Dependent variables
Pep6 = y[10]
Pepm6 = y[11]
syn_Pep6 = param[7]
# Differential equations
dPep6_dt = '(syn_Pep6)-(Kmature*Pep6)'
dPepm6_dt = '(Kmature*Pep6)-(deg_Pep*Pepm6)'
dys = dys + [eval(dPep6_dt), eval(dPepm6_dt)]
dy = dy + [dPep6_dt, dPepm6_dt]
if Operation == 'Solve':
return dys
elif Operation == 'GetODE':
return dy
else:
print('Error: Please Enter the correct Operation for solveODE function')
### Multi-ODE Model (different RBSs same promoter) for Constitutive Promoter ###
def solveODE_MultiDoubleFixPromoter(y, t, param, TotalDataSet, Operation = 'Solve'): #must include the comma at the end
# Dependent variables
mRNA1 = y[0]
Pep1 = y[1]
mRNA2 = y[2]
Pep2 = y[3]
# Parameters
deg_mRNA = 0.1386 # Fixed as ln2/5(mins) = 0.1386
syn_mRNA = param[0]
deg_Pep = param[1]
syn_Pep1 = param[2]
syn_Pep2 = param[3]
# Differential equations
dmRNA1_dt = '(syn_mRNA)-(deg_mRNA*mRNA1)'
dPep1_dt = '(syn_Pep1*mRNA1)-(deg_Pep*Pep1)'
dmRNA2_dt = '(syn_mRNA)-(deg_mRNA*mRNA2)'
dPep2_dt = '(syn_Pep2*mRNA2)-(deg_Pep*Pep2)'
dys = [eval(dmRNA1_dt), eval(dPep1_dt), eval(dmRNA2_dt), eval(dPep2_dt)]
dy = [dmRNA1_dt, dPep1_dt, dmRNA2_dt, dPep2_dt]
if TotalDataSet >= 3:
# Dependent variables
mRNA3 = y[4]
Pep3 = y[5]
syn_Pep3 = param[4]
# Differential equations
dmRNA3_dt = '(syn_mRNA)-(deg_mRNA*mRNA3)'
dPep3_dt = '(syn_Pep3*mRNA3)-(deg_Pep*Pep3)'
dys = dys + [eval(dmRNA3_dt), eval(dPep3_dt)]
dy = dy + [dmRNA3_dt, dPep3_dt]
if TotalDataSet >= 4:
# Dependent variables
mRNA4 = y[6]
Pep4 = y[7]
syn_Pep4 = param[5]
# Differential equations
dmRNA4_dt = '(syn_mRNA)-(deg_mRNA*mRNA4)'
dPep4_dt = '(syn_Pep4*mRNA4)-(deg_Pep*Pep4)'
dys = dys + [eval(dmRNA4_dt), eval(dPep4_dt)]
dy = dy + [dmRNA4_dt, dPep4_dt]
if TotalDataSet >= 5:
# Dependent variables
mRNA5 = y[8]
Pep5 = y[9]
syn_Pep5 = param[6]
# Differential equations
dmRNA5_dt = '(syn_mRNA)-(deg_mRNA*mRNA5)'
dPep5_dt = '(syn_Pep5*mRNA5)-(deg_Pep*Pep5)'
dys = dys + [eval(dmRNA5_dt), eval(dPep5_dt)]
dy = dy + [dmRNA5_dt, dPep5_dt]
if TotalDataSet >= 6:
# Dependent variables
mRNA6 = y[10]
Pep6 = y[11]
syn_Pep6 = param[7]
# Differential equations
dmRNA6_dt = '(syn_mRNA)-(deg_mRNA*mRNA6)'
dPep6_dt = '(syn_Pep6*mRNA6)-(deg_Pep*Pep6)'
dys = dys + [eval(dmRNA6_dt), eval(dPep6_dt)]
dy = dy + [dmRNA6_dt, dPep6_dt]
if Operation == 'Solve':
return dys
elif Operation == 'GetODE':
return dy
else:
print('Error: Please Enter the correct Operation for solveODE function')
def solveODE_MultiDoubleFixPromoterKMat(y, t, param, TotalDataSet, Operation = 'Solve'): #must include the comma at the end
# Dependent variables
mRNA1 = y[0]
Pep1 = y[1]
Pepm1 = y[2]
mRNA2 = y[3]
Pep2 = y[4]
Pepm2 = y[5]
# Parameters
deg_mRNA = 0.1386 # Fixed as ln2/5(mins) = 0.1386
syn_mRNA = param[0]
deg_Pep = param[1]
syn_Pep1 = param[2]
syn_Pep2 = param[3]
Kmature = param[4]
# Differential equations
dmRNA1_dt = '(syn_mRNA)-(deg_mRNA*mRNA1)'
dPep1_dt = '(syn_Pep1*mRNA1)-(Kmature*Pep1)'
dPepm1_dt = '(Kmature*Pep1)-(deg_Pep*Pepm1)'
dmRNA2_dt = '(syn_mRNA)-(deg_mRNA*mRNA2)'
dPep2_dt = '(syn_Pep2*mRNA2)-(Kmature*Pep2)'
dPepm2_dt = '(Kmature*Pep2)-(deg_Pep*Pepm2)'
dys = [eval(dmRNA1_dt), eval(dPep1_dt), eval(dPepm1_dt), eval(dmRNA2_dt), eval(dPep2_dt), eval(dPepm2_dt)]
dy = [dmRNA1_dt, dPep1_dt, dPepm1_dt, dmRNA2_dt, dPep2_dt, dPepm2_dt]
if TotalDataSet >= 3:
# Dependent variables
mRNA3 = y[6]
Pep3 = y[7]
Pepm3 = y[8]
syn_Pep3 = param[5]
# Differential equations
dmRNA3_dt = '(syn_mRNA)-(deg_mRNA*mRNA3)'
dPep3_dt = '(syn_Pep3*mRNA3)-(Kmature*Pep3)'
dPepm3_dt = '(Kmature*Pep3)-(deg_Pep*Pepm3)'
dys = dys + [eval(dmRNA3_dt), eval(dPep3_dt), eval(dPepm3_dt)]
dy = dy + [dmRNA3_dt, dPep3_dt, dPepm3_dt]
if TotalDataSet >= 4:
# Dependent variables
mRNA4 = y[9]
Pep4 = y[10]
Pepm4 = y[11]
syn_Pep4 = param[6]
# Differential equations
dmRNA4_dt = '(syn_mRNA)-(deg_mRNA*mRNA4)'
dPep4_dt = '(syn_Pep4*mRNA4)-(Kmature*Pep4)'
dPepm4_dt = '(Kmature*Pep4)-(deg_Pep*Pepm4)'
dys = dys + [eval(dmRNA4_dt), eval(dPep4_dt), eval(dPepm4_dt)]
dy = dy + [dmRNA4_dt, dPep4_dt, dPepm4_dt]
if TotalDataSet >= 5:
# Dependent variables
mRNA5 = y[12]
Pep5 = y[13]
Pepm5 = y[14]
syn_Pep5 = param[7]
# Differential equations
dmRNA5_dt = '(syn_mRNA)-(deg_mRNA*mRNA5)'
dPep5_dt = '(syn_Pep5*mRNA5)-(Kmature*Pep5)'
dPepm5_dt = '(Kmature*Pep5)-(deg_Pep*Pepm5)'
dys = dys + [eval(dmRNA5_dt), eval(dPep5_dt), eval(dPepm5_dt)]
dy = dy + [dmRNA5_dt, dPep5_dt, dPepm5_dt]
if TotalDataSet >= 6:
# Dependent variables
mRNA6 = y[15]
Pep6 = y[16]
Pepm6 = y[17]
syn_Pep6 = param[8]
# Differential equations
dmRNA6_dt = '(syn_mRNA)-(deg_mRNA*mRNA6)'
dPep6_dt = '(syn_Pep6*mRNA6)-(Kmature*Pep6)'
dPepm6_dt = '(Kmature*Pep6)-(deg_Pep*Pepm6)'
dys = dys + [eval(dmRNA6_dt), eval(dPep6_dt), eval(dPepm6_dt)]
dy = dy + [dmRNA6_dt, dPep6_dt, dPepm6_dt]
if Operation == 'Solve':
return dys
elif Operation == 'GetODE':
return dy
else:
print('Error: Please Enter the correct Operation for solveODE function')
function_mappings = {
'solveODE_ConstSingle': solveODE_ConstSingle,
'solveODE_ConstSingleKMat': solveODE_ConstSingleKMat,
'solveODE_ConstDouble': solveODE_ConstDouble,
'solveODE_ConstDoubleKMat': solveODE_ConstDoubleKMat,
'solveODE_MultiDoubleFixRBS': solveODE_MultiDoubleFixRBS,
'solveODE_MultiDoubleFixRBSKMat': solveODE_MultiDoubleFixRBSKMat,
'solveODE_MultiSingleFixRBS': solveODE_MultiSingleFixRBS,
'solveODE_MultiSingleFixRBSKMat': solveODE_MultiSingleFixRBSKMat,
'solveODE_MultiDoubleFixPromoter': solveODE_MultiDoubleFixPromoter,
'solveODE_MultiDoubleFixPromoterKMat': solveODE_MultiDoubleFixPromoterKMat,
}
def select_function(self, SystemTypeString):
### Convert SystemType from string to function name
try:
return self.function_mappings[SystemTypeString]
except KeyError:
print('Invalid function, try again.')
def ComputeSSE(self, param, y0, rfp_data, time_int, SystemType, VarIndex, OptimizerType, TotalDataSet):
### Calculate SSE - To be minimized by Optimizer ####
global sse_global
# Time grid == no. of times to report solution
rfp_data_numrows = np.size(rfp_data, 0)
rfp_data_numcols = np.size(rfp_data, 1)
t_start = rfp_data[0][0] # First value of Time column
t_end = rfp_data[0][-1] # Last value of Time column
dt = 10 # minutes
timestep = int((t_end / dt) + 1)
t = np.linspace(t_start, t_end, timestep)
# Initialize mRNA and Pep nested list
#mRNA = np.zeros((timestep, numInd), dtype=object) # timestep (rows) x numInd (cols)
Pep = np.zeros((timestep, TotalDataSet), dtype=object) # timestep (rows) x numInd (cols)
solveODE_Name = '_'.join(('solveODE', SystemType))
solveODEfun = self.select_function(solveODE_Name) #convert string to function name
if OptimizerType == 'Global':
# Integrate the ODE equations
ODEsoln = odeint(solveODEfun, y0, t, args=(param, TotalDataSet)) #must have more than one additional arg
#mRNA[j][i] = ODEsoln[j][VarIndex[0]] # mRNA array runs downwards with time
for i in range(0, len(VarIndex)):
for j in range(0, timestep):
Pep[j][i] = ODEsoln[j][VarIndex[i]] # Pep array runs downwards with time
elif OptimizerType == 'Local':
# Integrate the ODE equations
ODEsoln = odeint(solveODEfun, y0, t, args=(param, TotalDataSet))
for i in range(0, len(VarIndex)):
for j in range(0, timestep):
#mRNA[j][i] = ODEsoln[j][VarIndex[0]] # mRNA array runs downwards with time
Pep[j][i] = ODEsoln[j][VarIndex[i]] # Pep array runs downwards with time
else:
print('No specified Optimizer Type')
'''
Calculate SSE_Time
'''
# rfp_data runs lengthwise with time
sse_time = 0
for i in range(1, rfp_data_numrows): # Start from 1 because Row 0 is time
for j in range(0, rfp_data_numcols):
# time_int * i to find the corresponding model Pep value
sse_time = sse_time + (Pep[int(time_int/dt)*j, i-1] - rfp_data[i][j])**2
sse = sse_time
# update and return to main function
sse_global = sse
if OptimizerType == 'Global':
print('Model: ', SystemType, '- SSE (Global):', sse)
elif OptimizerType == 'Local':
print('Model: ', SystemType, '- SSE (Local):', sse)
else:
print('Error in ComputeSSE function')
return sse
def Run_ConstitutiveSystem(self, SystemType, data_header, data_array, TotalDataSet):
Time_interval = data_array[0][1] - data_array[0][0]
### ODE Input (Inducer Degradation) ###
# Initial conditions for (mRNA, Pep, Ind) at time = 0
if (SystemType == 'ConstDouble'):
mRNA0 = 0.
Pep0 = data_array[1][0]
y0 = [mRNA0, Pep0] # Initial condition
VariableName = ['mRNA', 'Pep'] # Variables Name
VarIndex =[VariableName.index('Pep')] #get the index for mRNA and RFP
### Number of Parameters to be optimized
numParam = 3
ParamName = ['syn_mRNA','syn_Pep','deg_Pep']
ParamUnits = ['molL-1min-1', 'min-1', 'min-1']
# Global bounds - In order of (a_mRNA, a_Pep, y_Pep)
param0_global = [(5e-8, 5e-7), (0, 0.02), (0.001, 0.02)] #Diff Evo
# Local bounds
LB = [1e-10, 1e-6, 0.001]
UB = [5e-7, 0.02, 0.02]
#LB = [0]*(numParam-1) + [0.001]
#UB = [None]*(numParam)
elif (SystemType == 'ConstDoubleKMat'):
mRNA0 = 0.
Pep0 = 0.
Pepm0 = data_array[1][0]
y0 = [mRNA0, Pep0, Pepm0] # Initial condition
VariableName = ['mRNA', 'Pep', 'Pepm'] # Variables Name
VarIndex =[VariableName.index('Pepm')] #get the index for mRNA and RFP
### Number of Parameters to be optimized
numParam = 4
ParamName = ['syn_mRNA','syn_Pep','deg_Pep', 'Kmature']
ParamUnits = ['molL-1min-1', 'min-1', 'min-1', 'min-1']
# Global bounds - In order of (a_mRNA, a_Pep, y_Pep)
param0_global = [(5e-8, 5e-7), (0, 0.02), (0.001, 0.02), (0.001, 1)] #Diff Evo
# Local bounds
LB = [1e-10, 1e-6, 0.001, 0.002]
UB = [5e-7, 0.02, 0.02, 1]
elif (SystemType == 'ConstSingle'):
Pep0 = data_array[1][0]
y0 = [Pep0] # Initial condition
VariableName = ['Pep'] # Variables Name
VarIndex =[VariableName.index('Pep')] #get the index for RFP
### Number of Parameters to be optimized
numParam = 2 # Fixed for Constant Inducer Model
ParamName = ['syn_Pep','deg_Pep']
ParamUnits = ['molL-1min-1', 'min-1']
# Global bounds - In order of (a_Pep, y_Pep)
param0_global = [(5e-8, 5e-7), (0.001, 0.02)] #Diff Evo
# Local bounds
LB = [1e-10, 0.001]
UB = [5e-7, 0.02]
#LB = [0]*(numParam-1) + [0.001]
#UB = [None]*(numParam)
elif (SystemType == 'ConstSingleKMat'):
Pep0 = 0.
Pepm0 = data_array[1][0]
y0 = [Pep0, Pepm0] # Initial condition
VariableName = ['Pep', 'Pepm'] # Variables Name
VarIndex =[VariableName.index('Pepm')] #get the index for RFP
### Number of Parameters to be optimized
numParam = 3 # Fixed for Constant Inducer Model
ParamName = ['syn_Pep','deg_Pep', 'Kmature']
ParamUnits = ['molL-1min-1', 'min-1', 'min-1']
# Global bounds - In order of (a_Pep, y_Pep)
param0_global = [(5e-8, 5e-7), (0.001, 0.02), (0.001, 1)] #Diff Evo
# Local bounds
LB = [1e-10, 0.001, 0.002]
UB = [5e-7, 0.02, 1]
elif (SystemType == 'MultiDoubleFixRBS'):
mRNA10 = 0.
Pep10 = data_array[1][0]
mRNA20 = 0.
Pep20 = data_array[2][0]
y0 = [mRNA10, Pep10, mRNA20, Pep20] # Initial condition
VariableName = ['mRNA1', 'Pep1', 'mRNA2', 'Pep2'] # Variables Name
VarIndex =[VariableName.index('Pep1'), VariableName.index('Pep2')] #get the index for mRNA and RFP
### Number of Parameters to be optimized
numParam = 4
ParamName = ['syn_Pep','deg_Pep','syn_mRNA1', 'syn_mRNA2']
ParamUnits = ['min-1','min-1', 'molL-1min-1', 'molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
# Global bounds - In order of (a_mRNA, a_Pep, y_Pep)
param0_global = [(0, 0.02), (0.001, 0.02), (5e-8, 5e-7),
(5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = [1e-6, 0.001, 1e-10, 1e-10]
UB = [0.02, 0.02, 5e-7, 5e-7]
if TotalDataSet >= 3:
mRNA30 = 0.
Pep30 = data_array[3][0]
y0 = y0 + [mRNA30, Pep30] # Initial condition
VariableName = VariableName + ['mRNA3', 'Pep3'] # Variables Name
VarIndex =VarIndex + [VariableName.index('Pep3')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 5
ParamName = ParamName + ['syn_mRNA3']
ParamUnits = ParamUnits + ['molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
# Global bounds - In order of (a_mRNA, a_Pep, y_Pep)
param0_global = param0_global + [(5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = LB + [1e-10]
UB = UB + [5e-7]
if TotalDataSet >= 4:
mRNA40 = 0.
Pep40 = data_array[4][0]
y0 = y0 + [mRNA40, Pep40] # Initial condition
VariableName = VariableName + ['mRNA4', 'Pep4'] # Variables Name
VarIndex = VarIndex + [VariableName.index('Pep4')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 6
ParamName = ParamName + ['syn_mRNA4']
ParamUnits = ParamUnits + ['molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
# Global bounds - In order of (a_mRNA, a_Pep, y_Pep)
param0_global = param0_global + [(5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = LB + [1e-10]
UB = UB + [5e-7]
if TotalDataSet >= 5:
mRNA50 = 0.
Pep50 = data_array[5][0]
y0 = y0 + [mRNA50, Pep50] # Initial condition
VariableName = VariableName + ['mRNA5', 'Pep5'] # Variables Name
VarIndex = VarIndex + [VariableName.index('Pep5')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 7
ParamName = ParamName + ['syn_mRNA5']
ParamUnits = ParamUnits + ['molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
param0_global = param0_global + [(5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = LB + [1e-10]
UB = UB + [5e-7]
if TotalDataSet >= 6:
mRNA60 = 0.
Pep60 = data_array[6][0]
y0 = y0 + [mRNA60, Pep60] # Initial condition
VariableName = VariableName + ['mRNA6', 'Pep6'] # Variables Name
VarIndex = VarIndex + [VariableName.index('Pep6')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 8
ParamName = ParamName + ['syn_mRNA6']
ParamUnits = ParamUnits + ['molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
param0_global = param0_global + [(5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = LB + [1e-10]
UB = UB + [5e-7]
elif (SystemType == 'MultiDoubleFixRBSKMat'):
mRNA10 = 0.
Pep10 = 0.
Pepm10 = data_array[1][0]
mRNA20 = 0.
Pep20 = 0.
Pepm20 = data_array[2][0]
y0 = [mRNA10, Pep10, Pepm10, mRNA20, Pep20, Pepm20] # Initial condition
VariableName = ['mRNA1', 'Pep1', 'Pepm1', 'mRNA2', 'Pep2', 'Pepm2'] # Variables Name
VarIndex =[VariableName.index('Pepm1'), VariableName.index('Pepm2')] #get the index for mRNA and RFP
### Number of Parameters to be optimized
numParam = 5
ParamName = ['syn_Pep','deg_Pep','syn_mRNA1', 'syn_mRNA2', 'Kmature']
ParamUnits = ['min-1','min-1', 'molL-1min-1', 'molL-1min-1', 'min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
# Global bounds - In order of (a_mRNA, a_Pep, y_Pep)
param0_global = [(0, 0.02), (0.001, 0.02), (5e-8, 5e-7),
(5e-8, 5e-7), (0.001, 1)] #Diff Evo
# Local bounds
LB = [1e-6, 0.001, 1e-10, 1e-10, 0.002]
UB = [0.02, 0.02, 5e-7, 5e-7, 1]
if TotalDataSet >= 3:
mRNA30 = 0.
Pep30 = 0.
Pepm30 = data_array[3][0]
y0 = y0 + [mRNA30, Pep30, Pepm30] # Initial condition
VariableName = VariableName + ['mRNA3', 'Pep3', 'Pepm3'] # Variables Name
VarIndex =VarIndex + [VariableName.index('Pepm3')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 6
ParamName = ParamName + ['syn_mRNA3']
ParamUnits = ParamUnits + ['molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
# Global bounds - In order of (a_mRNA, a_Pep, y_Pep)
param0_global = param0_global + [(5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = LB + [1e-10]
UB = UB + [5e-7]
if TotalDataSet >= 4:
mRNA40 = 0.
Pep40 = 0.
Pepm40 = data_array[4][0]
y0 = y0 + [mRNA40, Pep40, Pepm40] # Initial condition
VariableName = VariableName + ['mRNA4', 'Pep4', 'Pepm4'] # Variables Name
VarIndex = VarIndex + [VariableName.index('Pepm4')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 7
ParamName = ParamName + ['syn_mRNA4']
ParamUnits = ParamUnits + ['molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
# Global bounds - In order of (a_mRNA, a_Pep, y_Pep)
param0_global = param0_global + [(5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = LB + [1e-10]
UB = UB + [5e-7]
if TotalDataSet >= 5:
mRNA50 = 0.
Pep50 = 0.
Pepm50 = data_array[5][0]
y0 = y0 + [mRNA50, Pep50, Pepm50] # Initial condition
VariableName = VariableName + ['mRNA5', 'Pep5', 'Pepm5'] # Variables Name
VarIndex = VarIndex + [VariableName.index('Pepm5')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 8
ParamName = ParamName + ['syn_mRNA5']
ParamUnits = ParamUnits + ['molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
param0_global = param0_global + [(5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = LB + [1e-10]
UB = UB + [5e-7]
if TotalDataSet >= 6:
mRNA60 = 0.
Pep60 = 0.
Pepm60 = data_array[6][0]
y0 = y0 + [mRNA60, Pep60, Pepm60] # Initial condition
VariableName = VariableName + ['mRNA6', 'Pep6', 'Pepm6'] # Variables Name
VarIndex = VarIndex + [VariableName.index('Pepm6')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 9
ParamName = ParamName + ['syn_mRNA6']
ParamUnits = ParamUnits + ['molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
param0_global = param0_global + [(5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = LB + [1e-10]
UB = UB + [5e-7]
elif (SystemType == 'MultiSingleFixRBS'):
Pep10 = data_array[1][0]
Pep20 = data_array[2][0]
y0 = [Pep10, Pep20] # Initial condition
VariableName = ['Pep1', 'Pep2'] # Variables Name
VarIndex =[VariableName.index('Pep1'), VariableName.index('Pep2')] #get the index for mRNA and RFP
### Number of Parameters to be optimized
numParam = 3
ParamName = ['deg_Pep','syn_Pep1', 'syn_Pep2']
ParamUnits = ['min-1', 'molL-1min-1', 'molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
# Global bounds - In order of (a_mRNA, a_Pep, y_Pep)
param0_global = [(0.001, 0.02), (5e-8, 5e-7), (5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = [0.001, 1e-10, 1e-10]
UB = [0.02, 5e-7, 5e-7]
if TotalDataSet >= 3:
Pep30 = data_array[3][0]
y0 = y0 + [Pep30] # Initial condition
VariableName = VariableName + ['Pep3'] # Variables Name
VarIndex =VarIndex + [VariableName.index('Pep3')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 4
ParamName = ParamName + ['syn_Pep3']
ParamUnits = ParamUnits + ['molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
# Global bounds - In order of (a_mRNA, a_Pep, y_Pep)
param0_global = param0_global + [(5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = LB + [1e-10]
UB = UB + [5e-7]
if TotalDataSet >= 4:
Pep40 = data_array[4][0]
y0 = y0 + [Pep40] # Initial condition
VariableName = VariableName + ['Pep4'] # Variables Name
VarIndex = VarIndex + [VariableName.index('Pep4')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 5
ParamName = ParamName + ['syn_Pep4']
ParamUnits = ParamUnits + ['molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
# Global bounds - In order of (a_mRNA, a_Pep, y_Pep)
param0_global = param0_global + [(5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = LB + [1e-10]
UB = UB + [5e-7]
if TotalDataSet >= 5:
Pep50 = data_array[5][0]
y0 = y0 + [Pep50] # Initial condition
VariableName = VariableName + ['Pep5'] # Variables Name
VarIndex = VarIndex + [VariableName.index('Pep5')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 6
ParamName = ParamName + ['syn_Pep5']
ParamUnits = ParamUnits + ['molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
param0_global = param0_global + [(5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = LB + [1e-10]
UB = UB + [5e-7]
if TotalDataSet >= 6:
Pep60 = data_array[6][0]
y0 = y0 + [Pep60] # Initial condition
VariableName = VariableName + ['Pep6'] # Variables Name
VarIndex = VarIndex + [VariableName.index('Pep6')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 7
ParamName = ParamName + ['syn_Pep6']
ParamUnits = ParamUnits + ['molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
param0_global = param0_global + [(5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = LB + [1e-10]
UB = UB + [5e-7]
elif (SystemType == 'MultiSingleFixRBSKMat'):
Pep10 = 0.
Pepm10 = data_array[1][0]
Pep20 = 0.
Pepm20 = data_array[2][0]
y0 = [Pep10, Pepm10, Pep20, Pepm20] # Initial condition
VariableName = ['Pep1', 'Pepm1', 'Pep2', 'Pepm2'] # Variables Name
VarIndex =[VariableName.index('Pepm1'), VariableName.index('Pepm2')] #get the index for mRNA and RFP
### Number of Parameters to be optimized
numParam = 4
ParamName = ['deg_Pep','syn_Pep1', 'syn_Pep2', 'Kmature']
ParamUnits = ['min-1', 'molL-1min-1', 'molL-1min-1', 'min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
# Global bounds - In order of (a_mRNA, a_Pep, y_Pep)
param0_global = [(0.001, 0.02), (5e-8, 5e-7), (5e-8, 5e-7), (0.001, 1)] #Diff Evo
# Local bounds
LB = [0.001, 1e-10, 1e-10, 0.001]
UB = [0.02, 5e-7, 5e-7, 1]
if TotalDataSet >= 3:
Pep30 = 0.
Pepm30 = data_array[3][0]
y0 = y0 + [Pep30, Pepm30] # Initial condition
VariableName = VariableName + ['Pep3', 'Pepm3'] # Variables Name
VarIndex =VarIndex + [VariableName.index('Pepm3')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 5
ParamName = ParamName + ['syn_Pep3']
ParamUnits = ParamUnits + ['molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
# Global bounds - In order of (a_mRNA, a_Pep, y_Pep)
param0_global = param0_global + [(5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = LB + [1e-10]
UB = UB + [5e-7]
if TotalDataSet >= 4:
Pep40 = 0.
Pepm40 = data_array[4][0]
y0 = y0 + [Pep40, Pepm40] # Initial condition
VariableName = VariableName + ['Pep4', 'Pepm4'] # Variables Name
VarIndex = VarIndex + [VariableName.index('Pepm4')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 6
ParamName = ParamName + ['syn_Pep4']
ParamUnits = ParamUnits + ['molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
# Global bounds - In order of (a_mRNA, a_Pep, y_Pep)
param0_global = param0_global + [(5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = LB + [1e-10]
UB = UB + [5e-7]
if TotalDataSet >= 5:
Pep50 = 0.
Pepm50 = data_array[5][0]
y0 = y0 + [Pep50, Pepm50] # Initial condition
VariableName = VariableName + ['Pep5', 'Pepm5'] # Variables Name
VarIndex = VarIndex + [VariableName.index('Pepm5')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 7
ParamName = ParamName + ['syn_Pep5']
ParamUnits = ParamUnits + ['molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
param0_global = param0_global + [(5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = LB + [1e-10]
UB = UB + [5e-7]
if TotalDataSet >= 6:
Pep60 = 0.
Pepm60 = data_array[6][0]
y0 = y0 + [Pep60, Pepm60] # Initial condition
VariableName = VariableName + ['Pep6', 'Pepm6'] # Variables Name
VarIndex = VarIndex + [VariableName.index('Pepm6')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 8
ParamName = ParamName + ['syn_Pep6']
ParamUnits = ParamUnits + ['molL-1min-1']
# Global bounds - In order of (syn_Pep, deg_Pep, syn_mRNA1, syn_mRNA2, syn_mRNA3])
param0_global = param0_global + [(5e-8, 5e-7)] #Diff Evo
# Local bounds
LB = LB + [1e-10]
UB = UB + [5e-7]
elif (SystemType == 'MultiDoubleFixPromoter'):
mRNA10 = 0.
Pep10 = data_array[1][0]
mRNA20 = 0.
Pep20 = data_array[2][0]
y0 = [mRNA10, Pep10, mRNA20, Pep20] # Initial condition
VariableName = ['mRNA1', 'Pep1', 'mRNA2', 'Pep2'] # Variables Name
VarIndex =[VariableName.index('Pep1'), VariableName.index('Pep2')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 4
ParamName = ['syn_mRNA', 'deg_Pep', 'syn_Pep1', 'syn_Pep2']
ParamUnits = ['molL-1min-1', 'min-1', 'min-1', 'min-1']
# Global bounds - In order of (syn_mRNA, deg_Pep, syn_Pep1, syn_Pep2)
param0_global = [(5e-8, 5e-7), (0.001, 0.02), (0, 0.02), (0, 0.02)] #Diff Evo
# Local bounds
LB = [1e-10, 0.001, 1e-6, 1e-6]
UB = [5e-7, 0.02, 0.02, 0.02]
if TotalDataSet >= 3:
mRNA30 = 0.
Pep30 = data_array[3][0]
y0 = y0 + [mRNA30, Pep30] # Initial condition
VariableName = VariableName + ['mRNA3', 'Pep3'] # Variables Name
VarIndex = VarIndex + [VariableName.index('Pep3')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 5
ParamName = ParamName + ['syn_Pep3']
ParamUnits = ParamUnits + ['min-1']
# Global bounds - In order of (syn_mRNA, deg_Pep, syn_Pep1, syn_Pep2, syn_Pep3)
param0_global = param0_global + [(0, 0.02)] #Diff Evo
# Local bounds
LB = LB + [1e-6]
UB = UB + [0.02]
if TotalDataSet >= 4:
mRNA40 = 0.
Pep40 = data_array[4][0]
y0 = y0 + [mRNA40, Pep40] # Initial condition
VariableName = VariableName + ['mRNA4', 'Pep4'] # Variables Name
VarIndex = VarIndex + [VariableName.index('Pep4')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 6
ParamName = ParamName + ['syn_Pep4']
ParamUnits = ParamUnits + ['min-1']
# Global bounds - In order of (syn_mRNA, deg_Pep, syn_Pep1, syn_Pep2, syn_Pep3)
param0_global = param0_global + [(0, 0.02)] #Diff Evo
# Local bounds
LB = LB + [1e-6]
UB = UB + [0.02]
if TotalDataSet >= 5:
mRNA50 = 0.
Pep50 = data_array[5][0]
y0 = y0 + [mRNA50, Pep50] # Initial condition
VariableName = VariableName + ['mRNA5', 'Pep5'] # Variables Name
VarIndex =VarIndex + [VariableName.index('Pep5')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 7
ParamName = ParamName + ['syn_Pep5']
ParamUnits = ParamUnits + ['min-1']
# Global bounds - In order of (syn_mRNA, deg_Pep, syn_Pep1, syn_Pep2, syn_Pep3)
param0_global = param0_global + [(0, 0.02)] #Diff Evo
# Local bounds
LB = LB + [1e-6]
UB = UB + [0.02]
if TotalDataSet >= 6:
mRNA60 = 0.
Pep60 = data_array[6][0]
y0 = y0 + [mRNA60, Pep60] # Initial condition
VariableName = VariableName + ['mRNA6', 'Pep6'] # Variables Name
VarIndex =VarIndex + [VariableName.index('Pep6')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 8
ParamName = ParamName + ['syn_Pep6']
ParamUnits = ParamUnits + ['min-1']
# Global bounds - In order of (syn_mRNA, deg_Pep, syn_Pep1, syn_Pep2, syn_Pep3)
param0_global = param0_global + [(0, 0.02)] #Diff Evo
# Local bounds
LB = LB + [1e-6]
UB = UB + [0.02]
elif (SystemType == 'MultiDoubleFixPromoterKMat'):
mRNA10 = 0.
Pep10 = 0.
Pepm10 = data_array[1][0]
mRNA20 = 0.
Pep20 = 0.
Pepm20 = data_array[2][0]
y0 = [mRNA10, Pep10, Pepm10, mRNA20, Pep20, Pepm20] # Initial condition
VariableName = ['mRNA1', 'Pep1', 'Pepm1', 'mRNA2', 'Pep2', 'Pepm2'] # Variables Name
VarIndex =[VariableName.index('Pepm1'), VariableName.index('Pepm2')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 5
ParamName = ['syn_mRNA', 'deg_Pep', 'syn_Pep1', 'syn_Pep2', 'Kmature']
ParamUnits = ['molL-1min-1', 'min-1', 'min-1', 'min-1', 'min-1']
# Global bounds - In order of (syn_mRNA, deg_Pep, syn_Pep1, syn_Pep2)
param0_global = [(5e-8, 5e-7), (0.001, 0.02), (0, 0.02), (0, 0.02), (0.001, 1)] #Diff Evo
# Local bounds
LB = [1e-10, 0.001, 1e-6, 1e-6, 0.002]
UB = [5e-7, 0.02, 0.02, 0.02, 1]
if TotalDataSet >= 3:
mRNA30 = 0.
Pep30 = 0.
Pepm30 = data_array[3][0]
y0 = y0 + [mRNA30, Pep30, Pepm30] # Initial condition
VariableName = VariableName + ['mRNA3', 'Pep3', 'Pepm3'] # Variables Name
VarIndex = VarIndex + [VariableName.index('Pepm3')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 6
ParamName = ParamName + ['syn_Pep3']
ParamUnits = ParamUnits + ['min-1']
# Global bounds - In order of (syn_mRNA, deg_Pep, syn_Pep1, syn_Pep2, syn_Pep3)
param0_global = param0_global + [(0, 0.02)] #Diff Evo
# Local bounds
LB = LB + [1e-6]
UB = UB + [0.02]
if TotalDataSet >= 4:
mRNA40 = 0.
Pep40 = 0.
Pepm40 = data_array[4][0]
y0 = y0 + [mRNA40, Pep40, Pepm40] # Initial condition
VariableName = VariableName + ['mRNA4', 'Pep4', 'Pepm4'] # Variables Name
VarIndex = VarIndex + [VariableName.index('Pepm4')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 7
ParamName = ParamName + ['syn_Pep4']
ParamUnits = ParamUnits + ['min-1']
# Global bounds - In order of (syn_mRNA, deg_Pep, syn_Pep1, syn_Pep2, syn_Pep3)
param0_global = param0_global + [(0, 0.02)] #Diff Evo
# Local bounds
LB = LB + [1e-6]
UB = UB + [0.02]
if TotalDataSet >= 5:
mRNA50 = 0.
Pep50 = 0.
Pepm50 = data_array[5][0]
y0 = y0 + [mRNA50, Pep50, Pepm50] # Initial condition
VariableName = VariableName + ['mRNA5', 'Pep5', 'Pepm5'] # Variables Name
VarIndex =VarIndex + [VariableName.index('Pepm5')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 8
ParamName = ParamName + ['syn_Pep5']
ParamUnits = ParamUnits + ['min-1']
# Global bounds - In order of (syn_mRNA, deg_Pep, syn_Pep1, syn_Pep2, syn_Pep3)
param0_global = param0_global + [(0, 0.02)] #Diff Evo
# Local bounds
LB = LB + [1e-6]
UB = UB + [0.02]
if TotalDataSet >= 6:
mRNA60 = 0.
Pep60 = 0.
Pepm60 = data_array[6][0]
y0 = y0 + [mRNA60, Pep60, Pepm60] # Initial condition
VariableName = VariableName + ['mRNA6', 'Pep6', 'Pepm6'] # Variables Name
VarIndex =VarIndex + [VariableName.index('Pepm6')] #get the index for all Peptides
### Number of Parameters to be optimized
numParam = 9
ParamName = ParamName + ['syn_Pep6']
ParamUnits = ParamUnits + ['min-1']
# Global bounds - In order of (syn_mRNA, deg_Pep, syn_Pep1, syn_Pep2, syn_Pep3)
param0_global = param0_global + [(0, 0.02)] #Diff Evo
# Local bounds
LB = LB + [1e-6]
UB = UB + [0.02]
# run Global optimizer
OptimizerType1 = 'Global'
result_diffevo = differential_evolution\
(self.ComputeSSE, param0_global, args=(y0, data_array, Time_interval, SystemType, VarIndex, OptimizerType1, TotalDataSet))
# run Local Optimizer (Nelder Mead)
OptimizerType2 = 'Local'
param0_local = np.zeros(numParam)
for i in range(0, numParam):
param0_local[i] = result_diffevo.x[i]
result_NM = cNM.constrNM(self.ComputeSSE, param0_local,LB,UB,args=(y0, data_array, Time_interval, SystemType, VarIndex, OptimizerType2, TotalDataSet),
xtol= 1e-15, full_output=True)
# Optimized Parameters
param_optimized = np.zeros(numParam)
for i in range(0, numParam):
param_optimized[i] = result_NM['xopt'][i]
return(param_optimized, sse_global, y0, VariableName, ParamName, ParamUnits)
# Plot CSV and Model Data #
def plotData_Combined(self, SystemType, Variable, y0, raw_data_header, rfp_data, Data_stddev, param, TotalDataSet):
### timespan for Model results (t)
t_start = rfp_data[0][0]
t_end = rfp_data[0][-1]
dt = 10 # minutes
timestep = int((t_end / dt) + 1)
t = np.linspace(t_start, t_end, timestep)
# Time grid == no. of times to report solution
rfp_data_numrows = np.size(rfp_data, 0)
rfp_data_numcols = np.size(rfp_data, 1)
mRNA = np.zeros((timestep, TotalDataSet), dtype=object)
Peptide = np.zeros((timestep, TotalDataSet), dtype=object)
Peptidem = np.zeros((timestep, TotalDataSet), dtype=object)
Variable_mappings = {
'mRNA': mRNA,
'Peptide': Peptide,
'Peptidem': Peptidem,
}
# initiate empty list to store all the arrays
VariableMatrix = [None]*len(Variable)
for i in range(0, len(Variable)):
VariableMatrix[i] = Variable_mappings[Variable[i]]
solveODE_Name = '_'.join(('solveODE', SystemType))
solveODEfun = self.select_function(solveODE_Name) #convert string to function name
# NummRNA = sum('mRNA' in s for s in Variable) #count the number of mRNA data
# NumPep = sum('Peptide' in s for s in Variable) #count the number of Peptide data
# Integrate the ODE equations
ODEsoln = odeint(solveODEfun, y0, t, args=(param, TotalDataSet))
for j in range(0, len(VariableMatrix)):
for i in range(0, TotalDataSet): # Iterates through DataSet
if (len(VariableMatrix) == 1) and (TotalDataSet > 1):
VariableMatrix[j][:,i] = ODEsoln[:,j+i]
elif (len(VariableMatrix) == 3):
VariableMatrix[j][:,i] = ODEsoln[:,j+3*i]
else:
VariableMatrix[j][:,i] = ODEsoln[:,j+2*i]
#VariableMatrix[j][k][i] = ODEsoln[k][j]
### Retrive the ODEs in String from the corresponding solveODEfun
ODEstring = solveODEfun(y0, t[0], param, TotalDataSet, 'GetODE')
### Defining time array from Row 0
time = rfp_data[0]
### Plot RFP Data vs Time ###
fig = plt.figure(figsize=(5,3.6))
ax = fig.add_axes([0.16,0.16,0.8,0.78])
plt.rc('font', size=16) # controls default text sizes
## CSV Data (time)
#style = ['o']
style = ['^','*','>','D','<','d','p','o','h','+','s','x','v','.','H']
for i in range (1, rfp_data_numrows):
#plt.plot(time, rfp_data[i], linestyle='None', marker = style[i-1], markersize = 4)
plt.errorbar(time, rfp_data[i], yerr = Data_stddev[i-1], capsize = 2, linestyle='None', marker = style[i-1], markersize = 4)
rfp_data_legend = raw_data_header[0:rfp_data_numcols]
plt.legend(rfp_data_legend, ncol=2, loc='upper left', prop={'size': 16},frameon=False)
axes = plt.gca()
ymin, ymax = axes.get_ylim()
#axes.set_ylim([0, ymax+0.2*ymax])
axes.set_ylim([0, ymax+0.25*ymax])
#axes.set_ylim([0, ymax+0.1*ymax]) #for multi-4x_fixpromoter_1
### Model Data (t)
# Resets colour cycle for the second plot over same figure
plt.gca().set_prop_cycle(None)
# plot model Pep data
if 'Peptidem' not in Variable:
Peptideid = Variable.index('Peptide') #get the index of mRNA
plt.plot(t, VariableMatrix[Peptideid][:,:], linewidth=2) # Pep
else:
Peptidemid = Variable.index('Peptidem') #get the index of mRNA
plt.plot(t, VariableMatrix[Peptidemid][:,:], linewidth=2) # Pep
#plt.title('Modelled Peptide Concentration vs Time')
plt.xlabel('Time (min)')
plt.ylabel('Expression Level (M/OD)')
# Set Y Axis Ticker to Scientific Style
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
# Figure border
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
axes = plt.gca()
ymin, ymax = axes.get_ylim()
#axes.set_ylim([0, ymax+0.2*ymax])
axes.set_ylim([0, ymax+0.25*ymax])
#axes.set_ylim([0, ymax+0.1*ymax]) #for multi-4x_fixpromoter_1
### Protein Data (t)
fig = plt.figure(figsize=(5,3.6))
ax = fig.add_axes([0.16,0.15,0.8,0.78])
plt.rc('font', size=16) # controls default text sizes
if 'Peptidem' not in Variable:
Peptideid = Variable.index('Peptide') #get the index of mRNA
plt.plot(t, VariableMatrix[Peptideid][:,:], linewidth=2) # Pep
else:
Peptidemid = Variable.index('Peptidem') #get the index of mRNA
plt.plot(t, VariableMatrix[Peptidemid][:,:], linewidth=2) # Pep
#plt.title('Modelled Peptide Concentration vs Time')
plt.xlabel('Time (min)')
plt.ylabel('Expression Level (M/OD)')
plt.legend(rfp_data_legend, ncol=2, loc='upper left', prop={'size': 16},frameon=False)
# Set Y Axis Ticker to Scientific Style
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
# Figure border
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
axes = plt.gca()
ymin, ymax = axes.get_ylim()
axes.set_ylim([0, ymax+0.3*ymax])
if 'mRNA' in Variable:
### mRNA Data (t)
fig = plt.figure(figsize=(5,3.6))
ax = fig.add_axes([0.16,0.15,0.8,0.78])
plt.rc('font', size=16) # controls default text sizes
mRNAid = Variable.index('mRNA') #get the index of mRNA
plt.plot(t, VariableMatrix[mRNAid][:,:], linewidth=2) # mRNA
#plt.title('Modelled mRNA Concentration vs Time')
plt.xlabel('Time (min)')
plt.ylabel('mRNA Concentration (M)')
plt.legend(rfp_data_legend, ncol=2, loc='upper left', prop={'size': 16},frameon=False)
# Set Y Axis Ticker to Scientific Style
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
# Figure border
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
axes = plt.gca()
ymin, ymax = axes.get_ylim()
#axes.set_ylim([0, ymax+0.35*ymax])
#axes.set_ylim([0, ymax+0.45*ymax])
axes.set_ylim([0, ymax+0.5*ymax])
return t, VariableMatrix, rfp_data_legend, ODEstring
def Run_ConstitutivePlot(self, SystemType, y0, data_header, data_array, Data_stddev, param_optimized, TotalDataSet):
if (SystemType == 'ConstDouble'):
VariablePlot = ['mRNA', 'Peptide'] # Variables Name
elif (SystemType == 'ConstDoubleKMat') or (SystemType == 'MultiDoubleFixPromoterKMat') or (SystemType == 'MultiDoubleFixRBSKMat'):
VariablePlot = ['mRNA', 'Peptide', 'Peptidem'] # Variables Name
elif (SystemType == 'ConstSingle'):
VariablePlot = ['Peptide'] # Variables Name
elif (SystemType == 'ConstSingleKMat') or (SystemType == 'MultiSingleFixRBSKMat'):
VariablePlot = ['Peptide', 'Peptidem'] # Variables Name
elif (SystemType == 'MultiDoubleFixRBS') or (SystemType == 'MultiDoubleFixPromoter'):
VariablePlot = ['mRNA', 'Peptide'] # Variables Name
elif (SystemType == 'MultiSingleFixRBS'):
VariablePlot = ['Peptide'] # Variables Name
else:
print('Error in Plotting module')
### Calculate and plot Model results (param_optimized) ###
t, VariableMatrix, rfp_data_legend, ODEstring = self.plotData_Combined(SystemType, VariablePlot, y0, data_header, data_array, Data_stddev, param_optimized, TotalDataSet)
return t, VariableMatrix, rfp_data_legend, ODEstring
def __del__(self):
class_name = self.__class__.__name__
print(class_name, "destroyed")
| 41.84592
| 177
| 0.488757
| 7,391
| 68,711
| 4.408876
| 0.060344
| 0.016572
| 0.019763
| 0.026821
| 0.857024
| 0.835696
| 0.801602
| 0.779507
| 0.752593
| 0.728564
| 0
| 0.06283
| 0.402614
| 68,711
| 1,641
| 178
| 41.87142
| 0.73104
| 0.19339
| 0
| 0.715278
| 0
| 0
| 0.108555
| 0.051046
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015873
| false
| 0
| 0.00496
| 0
| 0.046627
| 0.016865
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
41cdd6ca12a4c7b0ca9493369d0935677fcf7d0c
| 120
|
py
|
Python
|
ColabTurtlePlus/__init__.py
|
mathriddle/MyColabTurtle
|
214fe35d617689c65276045d2ff8a15bb5afbe0f
|
[
"MIT"
] | 1
|
2021-07-15T01:02:15.000Z
|
2021-07-15T01:02:15.000Z
|
ColabTurtlePlus/__init__.py
|
mathriddle/MyColabTurtle
|
214fe35d617689c65276045d2ff8a15bb5afbe0f
|
[
"MIT"
] | 1
|
2021-07-15T01:11:26.000Z
|
2021-09-24T01:06:45.000Z
|
ColabTurtlePlus/__init__.py
|
mathriddle/MyColabTurtle
|
214fe35d617689c65276045d2ff8a15bb5afbe0f
|
[
"MIT"
] | null | null | null |
print("Put clearscreen() as the first line in a cell (after the import command) to re-run turtle commands in the cell")
| 60
| 119
| 0.758333
| 22
| 120
| 4.136364
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 120
| 1
| 120
| 120
| 0.91
| 0
| 0
| 0
| 0
| 1
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 8
|
68e9b84efbabdb4c74d9cae72a6bcbbb3d6f2eca
| 64,247
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/junipernetworks/junos/plugins/module_utils/network/junos/argspec/bgp_global/bgp_global.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/junipernetworks/junos/plugins/module_utils/network/junos/argspec/bgp_global/bgp_global.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/junipernetworks/junos/plugins/module_utils/network/junos/argspec/bgp_global/bgp_global.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The arg spec for the junos_bgp_global module
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class Bgp_globalArgs(object): # pylint: disable=R0903
"""The arg spec for the junos_bgp_global module
"""
def __init__(self, **kwargs):
pass
argument_spec = {
"config": {
"options": {
"accept_remote_nexthop": {"type": "bool"},
"add_path_display_ipv4_address": {"type": "bool"},
"advertise_bgp_static": {
"options": {
"policy": {"type": "str"},
"set": {"type": "bool"},
},
"type": "dict",
},
"advertise_external": {
"options": {
"conditional": {"type": "bool"},
"set": {"type": "bool"},
},
"type": "dict",
},
"advertise_from_main_vpn_tables": {"type": "bool"},
"advertise_inactive": {"type": "bool"},
"advertise_peer_as": {"type": "bool"},
"as_number": {"type": "str"},
"asdot_notation": {"type": "bool"},
"authentication_algorithm": {
"choices": ["aes-128-cmac-96", "hmac-sha-1-96", "md5"],
"type": "str",
},
"authentication_key": {"type": "str", "no_log": True},
"authentication_key_chain": {"type": "str", "no_log": False},
"bfd_liveness_detection": {
"options": {
"authentication": {
"options": {
"algorithm": {
"choices": [
"keyed-md5",
"keyed-sha-1",
"meticulous-keyed-md5",
"meticulous-keyed-sha-1",
"simple-password",
],
"type": "str",
},
"key_chain": {"type": "str", "no_log": True},
"loose_check": {"type": "bool"},
},
"type": "dict",
},
"detection_time": {
"options": {"threshold": {"type": "int"}},
"type": "dict",
},
"holddown_interval": {"type": "int"},
"minimum_interval": {"type": "int"},
"minimum_receive_interval": {"type": "int"},
"multiplier": {"type": "int"},
"no_adaptation": {"type": "bool"},
"session_mode": {
"choices": ["automatic", "multihop", "single-hop"],
"type": "str",
},
"transmit_interval": {
"options": {
"minimum_interval": {"type": "int"},
"threshold": {"type": "int"},
},
"type": "dict",
},
"version": {
"choices": ["0", "1", "automatic"],
"type": "str",
},
},
"type": "dict",
},
"bgp_error_tolerance": {
"options": {
"malformed_route_limit": {"type": "int"},
"malformed_update_log_interval": {"type": "int"},
"no_malformed_route_limit": {"type": "bool"},
"set": {"type": "bool"},
},
"type": "dict",
},
"bmp": {
"options": {
"monitor": {"type": "bool"},
"route_monitoring": {
"options": {
"none": {"type": "bool"},
"post_policy": {"type": "bool"},
"post_policy_exclude_non_eligible": {
"type": "bool"
},
"post_policy_exclude_non_feasible": {
"type": "bool"
},
"pre_policy": {"type": "bool"},
},
"type": "dict",
},
},
"type": "dict",
},
"cluster_id": {"type": "str"},
"damping": {"type": "bool"},
"description": {"type": "str"},
"disable": {"type": "bool"},
"egress_te": {
"options": {
"backup_path": {"type": "str"},
"set": {"type": "bool"},
},
"type": "dict",
},
"egress_te_backup_paths": {
"options": {
"templates": {
"elements": "dict",
"options": {
"ip_forward": {
"options": {
"rti_name": {"type": "str"},
"set": {"type": "bool"},
},
"type": "dict",
},
"path_name": {"required": True, "type": "str"},
"peers": {"elements": "str", "type": "list"},
"remote_nexthop": {"type": "str"},
},
"type": "list",
}
},
"type": "dict",
},
"egress_te_set_segment": {
"elements": "dict",
"options": {
"egress_te_backup_segment_label": {"type": "int"},
"label": {"type": "int"},
"name": {"required": True, "type": "str"},
},
"type": "list",
},
"egress_te_sid_stats": {"type": "bool"},
"enforce_first_as": {"type": "bool"},
"export": {"type": "str"},
"forwarding_context": {"type": "str"},
"graceful_restart": {
"options": {
"disable": {"type": "bool"},
"dont_help_shared_fate_bfd_down": {"type": "bool"},
"forwarding_state_bit": {
"options": {
"as_rr_client": {"type": "bool"},
"from_fib": {"type": "bool"},
},
"type": "dict",
},
"long_lived": {
"options": {
"advertise_to_non_llgr_neighbor": {
"options": {
"omit_no_export": {"type": "bool"},
"set": {"type": "bool"},
},
"type": "dict",
},
"receiver_disable": {"type": "bool"},
},
"type": "dict",
},
"restart_time": {"type": "int"},
"set": {"type": "bool"},
"stale_routes_time": {"type": "int"},
},
"type": "dict",
},
"groups": {
"elements": "dict",
"options": {
"accept_remote_nexthop": {"type": "bool"},
"add_path_display_ipv4_address": {"type": "bool"},
"advertise_bgp_static": {
"options": {
"policy": {"type": "str"},
"set": {"type": "bool"},
},
"type": "dict",
},
"advertise_external": {
"options": {
"conditional": {"type": "bool"},
"set": {"type": "bool"},
},
"type": "dict",
},
"advertise_inactive": {"type": "bool"},
"advertise_peer_as": {"type": "bool"},
"allow": {"elements": "str", "type": "list"},
"as_override": {"type": "bool"},
"authentication_algorithm": {
"choices": [
"aes-128-cmac-96",
"hmac-sha-1-96",
"md5",
],
"type": "str",
},
"authentication_key": {"type": "str", "no_log": True},
"authentication_key_chain": {
"type": "str",
"no_log": False,
},
"bfd_liveness_detection": {
"options": {
"authentication": {
"options": {
"algorithm": {
"choices": [
"keyed-md5",
"keyed-sha-1",
"meticulous-keyed-md5",
"meticulous-keyed-sha-1",
"simple-password",
],
"type": "str",
},
"key_chain": {
"type": "str",
"no_log": True,
},
"loose_check": {"type": "bool"},
},
"type": "dict",
},
"detection_time": {
"options": {"threshold": {"type": "int"}},
"type": "dict",
},
"holddown_interval": {"type": "int"},
"minimum_interval": {"type": "int"},
"minimum_receive_interval": {"type": "int"},
"multiplier": {"type": "int"},
"no_adaptation": {"type": "bool"},
"session_mode": {
"choices": [
"automatic",
"multihop",
"single-hop",
],
"type": "str",
},
"transmit_interval": {
"options": {
"minimum_interval": {"type": "int"},
"threshold": {"type": "int"},
},
"type": "dict",
},
"version": {
"choices": ["0", "1", "automatic"],
"type": "str",
},
},
"type": "dict",
},
"bgp_error_tolerance": {
"options": {
"malformed_route_limit": {"type": "int"},
"malformed_update_log_interval": {
"type": "int"
},
"no_malformed_route_limit": {"type": "bool"},
"set": {"type": "bool"},
},
"type": "dict",
},
"bmp": {
"options": {
"monitor": {"type": "bool"},
"route_monitoring": {
"options": {
"none": {"type": "bool"},
"post_policy": {"type": "bool"},
"post_policy_exclude_non_eligible": {
"type": "bool"
},
"post_policy_exclude_non_feasible": {
"type": "bool"
},
"pre_policy": {"type": "bool"},
},
"type": "dict",
},
},
"type": "dict",
},
"cluster_id": {"type": "str"},
"damping": {"type": "bool"},
"description": {"type": "str"},
"egress_te": {
"options": {
"backup_path": {"type": "str"},
"set": {"type": "bool"},
},
"type": "dict",
},
"enforce_first_as": {"type": "bool"},
"export": {"type": "str"},
"forwarding_context": {"type": "str"},
"graceful_restart": {
"options": {
"disable": {"type": "bool"},
"dont_help_shared_fate_bfd_down": {
"type": "bool"
},
"forwarding_state_bit": {
"options": {
"as_rr_client": {"type": "bool"},
"from_fib": {"type": "bool"},
},
"type": "dict",
},
"long_lived": {
"options": {
"advertise_to_non_llgr_neighbor": {
"options": {
"omit_no_export": {
"type": "bool"
},
"set": {"type": "bool"},
},
"type": "dict",
},
"receiver_disable": {"type": "bool"},
},
"type": "dict",
},
"restart_time": {"type": "int"},
"set": {"type": "bool"},
"stale_routes_time": {"type": "int"},
},
"type": "dict",
},
"hold_time": {"type": "int"},
"idle_after_switch_over": {
"options": {
"forever": {"type": "bool"},
"timeout": {"type": "int"},
},
"type": "dict",
},
"import": {"elements": "str", "type": "list"},
"include_mp_next_hop": {"type": "bool"},
"ipsec_sa": {"type": "str"},
"keep": {"choices": ["all", "none"], "type": "str"},
"local_address": {"type": "str"},
"local_as": {
"options": {
"alias": {"type": "bool"},
"as_num": {"required": True, "type": "str"},
"loops": {"type": "int"},
"no_prepend_global_as": {"type": "bool"},
"private": {"type": "bool"},
},
"type": "dict",
},
"local_interface": {"type": "str"},
"local_preference": {"type": "str"},
"log_updown": {"type": "bool"},
"metric_out": {
"options": {
"igp": {
"options": {
"delay_med_update": {"type": "bool"},
"metric_offset": {"type": "int"},
"set": {"type": "bool"},
},
"type": "dict",
},
"metric_value": {"type": "int"},
"minimum_igp": {
"options": {
"metric_offset": {"type": "int"},
"set": {"type": "bool"},
},
"type": "dict",
},
},
"type": "dict",
},
"mtu_discovery": {"type": "bool"},
"multihop": {
"options": {
"no_nexthop_change": {"type": "bool"},
"set": {"type": "bool"},
"ttl": {"type": "int"},
},
"type": "dict",
},
"multipath": {
"options": {
"disable": {"type": "bool"},
"multiple_as": {"type": "bool"},
"multiple_as_disable": {"type": "bool"},
"set": {"type": "bool"},
},
"type": "dict",
},
"name": {"type": "str"},
"neighbors": {
"elements": "dict",
"options": {
"accept_remote_nexthop": {"type": "bool"},
"add_path_display_ipv4_address": {
"type": "bool"
},
"advertise_bgp_static": {
"options": {
"policy": {"type": "str"},
"set": {"type": "bool"},
},
"type": "dict",
},
"advertise_external": {
"options": {
"conditional": {"type": "bool"},
"set": {"type": "bool"},
},
"type": "dict",
},
"advertise_inactive": {"type": "bool"},
"advertise_peer_as": {"type": "bool"},
"as_override": {"type": "bool"},
"authentication_algorithm": {
"choices": [
"aes-128-cmac-96",
"hmac-sha-1-96",
"md5",
],
"type": "str",
},
"authentication_key": {
"type": "str",
"no_log": True,
},
"authentication_key_chain": {
"type": "str",
"no_log": False,
},
"bfd_liveness_detection": {
"options": {
"authentication": {
"options": {
"algorithm": {
"choices": [
"keyed-md5",
"keyed-sha-1",
"meticulous-keyed-md5",
"meticulous-keyed-sha-1",
"simple-password",
],
"type": "str",
},
"key_chain": {
"type": "str",
"no_log": False,
},
"loose_check": {
"type": "bool"
},
},
"type": "dict",
},
"detection_time": {
"options": {
"threshold": {"type": "int"}
},
"type": "dict",
},
"holddown_interval": {"type": "int"},
"minimum_interval": {"type": "int"},
"minimum_receive_interval": {
"type": "int"
},
"multiplier": {"type": "int"},
"no_adaptation": {"type": "bool"},
"session_mode": {
"choices": [
"automatic",
"multihop",
"single-hop",
],
"type": "str",
},
"transmit_interval": {
"options": {
"minimum_interval": {
"type": "int"
},
"threshold": {"type": "int"},
},
"type": "dict",
},
"version": {
"choices": ["0", "1", "automatic"],
"type": "str",
},
},
"type": "dict",
},
"bgp_error_tolerance": {
"options": {
"malformed_route_limit": {
"type": "int"
},
"malformed_update_log_interval": {
"type": "int"
},
"no_malformed_route_limit": {
"type": "bool"
},
"set": {"type": "bool"},
},
"type": "dict",
},
"bmp": {
"options": {
"monitor": {"type": "bool"},
"route_monitoring": {
"options": {
"none": {"type": "bool"},
"post_policy": {
"type": "bool"
},
"post_policy_exclude_non_eligible": {
"type": "bool"
},
"post_policy_exclude_non_feasible": {
"type": "bool"
},
"pre_policy": {"type": "bool"},
},
"type": "dict",
},
},
"type": "dict",
},
"cluster_id": {"type": "str"},
"damping": {"type": "bool"},
"description": {"type": "str"},
"egress_te": {
"options": {
"backup_path": {"type": "str"},
"set": {"type": "bool"},
},
"type": "dict",
},
"enforce_first_as": {"type": "bool"},
"export": {"type": "str"},
"forwarding_context": {"type": "str"},
"graceful_restart": {
"options": {
"disable": {"type": "bool"},
"dont_help_shared_fate_bfd_down": {
"type": "bool"
},
"forwarding_state_bit": {
"options": {
"as_rr_client": {
"type": "bool"
},
"from_fib": {"type": "bool"},
},
"type": "dict",
},
"long_lived": {
"options": {
"advertise_to_non_llgr_neighbor": {
"options": {
"omit_no_export": {
"type": "bool"
},
"set": {
"type": "bool"
},
},
"type": "dict",
},
"receiver_disable": {
"type": "bool"
},
},
"type": "dict",
},
"restart_time": {"type": "int"},
"set": {"type": "bool"},
"stale_routes_time": {"type": "int"},
},
"type": "dict",
},
"hold_time": {"type": "int"},
"idle_after_switch_over": {
"options": {
"forever": {"type": "bool"},
"timeout": {"type": "int"},
},
"type": "dict",
},
"import": {"elements": "str", "type": "list"},
"include_mp_next_hop": {"type": "bool"},
"ipsec_sa": {"type": "str"},
"keep": {
"choices": ["all", "none"],
"type": "str",
},
"local_address": {"type": "str"},
"local_as": {
"options": {
"alias": {"type": "bool"},
"as_num": {
"required": True,
"type": "str",
},
"loops": {"type": "int"},
"no_prepend_global_as": {
"type": "bool"
},
"private": {"type": "bool"},
},
"type": "dict",
},
"local_interface": {"type": "str"},
"local_preference": {"type": "str"},
"log_updown": {"type": "bool"},
"metric_out": {
"options": {
"igp": {
"options": {
"delay_med_update": {
"type": "bool"
},
"metric_offset": {
"type": "int"
},
"set": {"type": "bool"},
},
"type": "dict",
},
"metric_value": {"type": "int"},
"minimum_igp": {
"options": {
"metric_offset": {
"type": "int"
},
"set": {"type": "bool"},
},
"type": "dict",
},
},
"type": "dict",
},
"mtu_discovery": {"type": "bool"},
"multihop": {
"options": {
"no_nexthop_change": {"type": "bool"},
"set": {"type": "bool"},
"ttl": {"type": "int"},
},
"type": "dict",
},
"multipath": {
"options": {
"disable": {"type": "bool"},
"multiple_as": {"type": "bool"},
"multiple_as_disable": {
"type": "bool"
},
"set": {"type": "bool"},
},
"type": "dict",
},
"neighbor_address": {"type": "str"},
"no_advertise_peer_as": {"type": "bool"},
"no_aggregator_id": {"type": "bool"},
"no_client_reflect": {"type": "bool"},
"out_delay": {"type": "int"},
"outbound_route_filter": {
"options": {
"bgp_orf_cisco_mode": {"type": "bool"},
"prefix_based": {
"options": {
"accept": {
"options": {
"inet": {
"type": "bool"
},
"inet6": {
"type": "bool"
},
"set": {
"type": "bool"
},
},
"type": "dict",
},
"set": {"type": "bool"},
},
"type": "dict",
},
},
"type": "dict",
},
"passive": {"type": "bool"},
"peer_as": {"type": "str"},
"preference": {"type": "str"},
"remove_private": {
"options": {
"all": {"type": "bool"},
"all_replace": {"type": "bool"},
"all_replace_nearest": {
"type": "bool"
},
"no_peer_loop_check": {"type": "bool"},
"set": {"type": "bool"},
},
"type": "dict",
},
"rfc6514_compliant_safi129": {"type": "bool"},
"route_server_client": {"type": "bool"},
"tcp_aggressive_transmission": {
"type": "bool"
},
"tcp_mss": {"type": "int"},
"traceoptions": {
"options": {
"file": {
"options": {
"filename": {
"required": True,
"type": "str",
},
"files": {"type": "int"},
"no_world_readable": {
"type": "bool"
},
"size": {"type": "int"},
"world_readable": {
"type": "bool"
},
},
"type": "dict",
},
"flag": {
"elements": "dict",
"options": {
"detail": {"type": "bool"},
"disable": {"type": "bool"},
"filter": {
"options": {
"match_on_prefix": {
"type": "bool"
},
"policy": {
"type": "str"
},
"set": {
"type": "bool"
},
},
"type": "dict",
},
"name": {
"choices": [
"4byte-as",
"add-path",
"all",
"bfd",
"damping",
"egress-te",
"general",
"graceful-restart",
"keepalive",
"normal",
"nsr-synchronization",
"open",
"packets",
"policy",
"refresh",
"route",
"state",
"task",
"thread-io",
"thread-update-io",
"timer",
"update",
],
"required": True,
"type": "str",
},
"receive": {"type": "bool"},
"send": {"type": "bool"},
},
"type": "list",
},
},
"type": "dict",
},
"ttl": {"type": "int"},
"unconfigured_peer_graceful_restart": {
"type": "bool"
},
"vpn_apply_export": {"type": "bool"},
},
"type": "list",
},
"no_advertise_peer_as": {"type": "bool"},
"no_aggregator_id": {"type": "bool"},
"no_client_reflect": {"type": "bool"},
"optimal_route_reflection": {
"options": {
"igp_backup": {"type": "str"},
"igp_primary": {"type": "str"},
},
"type": "dict",
},
"out_delay": {"type": "int"},
"outbound_route_filter": {
"options": {
"bgp_orf_cisco_mode": {"type": "bool"},
"prefix_based": {
"options": {
"accept": {
"options": {
"inet": {"type": "bool"},
"inet6": {"type": "bool"},
"set": {"type": "bool"},
},
"type": "dict",
},
"set": {"type": "bool"},
},
"type": "dict",
},
},
"type": "dict",
},
"passive": {"type": "bool"},
"peer_as": {"type": "str"},
"preference": {"type": "str"},
"remove_private": {
"options": {
"all": {"type": "bool"},
"all_replace": {"type": "bool"},
"all_replace_nearest": {"type": "bool"},
"no_peer_loop_check": {"type": "bool"},
"set": {"type": "bool"},
},
"type": "dict",
},
"rfc6514_compliant_safi129": {"type": "bool"},
"route_server_client": {"type": "bool"},
"tcp_aggressive_transmission": {"type": "bool"},
"tcp_mss": {"type": "int"},
"traceoptions": {
"options": {
"file": {
"options": {
"filename": {
"required": True,
"type": "str",
},
"files": {"type": "int"},
"no_world_readable": {"type": "bool"},
"size": {"type": "int"},
"world_readable": {"type": "bool"},
},
"type": "dict",
},
"flag": {
"elements": "dict",
"options": {
"detail": {"type": "bool"},
"disable": {"type": "bool"},
"filter": {
"options": {
"match_on_prefix": {
"type": "bool"
},
"policy": {"type": "str"},
"set": {"type": "bool"},
},
"type": "dict",
},
"name": {
"choices": [
"4byte-as",
"add-path",
"all",
"bfd",
"damping",
"egress-te",
"general",
"graceful-restart",
"keepalive",
"normal",
"nsr-synchronization",
"open",
"packets",
"policy",
"refresh",
"route",
"state",
"task",
"thread-io",
"thread-update-io",
"timer",
"update",
],
"required": True,
"type": "str",
},
"receive": {"type": "bool"},
"send": {"type": "bool"},
},
"type": "list",
},
},
"type": "dict",
},
"ttl": {"type": "int"},
"type": {
"choices": ["external", "internal"],
"type": "str",
},
"unconfigured_peer_graceful_restart": {"type": "bool"},
"vpn_apply_export": {"type": "bool"},
},
"type": "list",
},
"hold_time": {"type": "int"},
"holddown_all_stale_labels": {"type": "bool"},
"idle_after_switch_over": {
"options": {
"forever": {"type": "bool"},
"timeout": {"type": "int"},
},
"type": "dict",
},
"import": {"elements": "str", "type": "list"},
"include_mp_next_hop": {"type": "bool"},
"ipsec_sa": {"type": "str"},
"keep": {"choices": ["all", "none"], "type": "str"},
"local_address": {"type": "str"},
"local_as": {
"options": {
"alias": {"type": "bool"},
"as_num": {"required": True, "type": "str"},
"loops": {"type": "int"},
"no_prepend_global_as": {"type": "bool"},
"private": {"type": "bool"},
},
"type": "dict",
},
"local_interface": {"type": "str"},
"local_preference": {"type": "str"},
"log_updown": {"type": "bool"},
"loops": {"type": "int"},
"metric_out": {
"options": {
"igp": {
"options": {
"delay_med_update": {"type": "bool"},
"metric_offset": {"type": "int"},
"set": {"type": "bool"},
},
"type": "dict",
},
"metric_value": {"type": "int"},
"minimum_igp": {
"options": {
"metric_offset": {"type": "int"},
"set": {"type": "bool"},
},
"type": "dict",
},
},
"type": "dict",
},
"mtu_discovery": {"type": "bool"},
"multihop": {
"options": {
"no_nexthop_change": {"type": "bool"},
"set": {"type": "bool"},
"ttl": {"type": "int"},
},
"type": "dict",
},
"multipath": {
"options": {
"disable": {"type": "bool"},
"multiple_as": {"type": "bool"},
"multiple_as_disable": {"type": "bool"},
"set": {"type": "bool"},
},
"type": "dict",
},
"multipath_build_priority": {
"choices": ["low", "medium"],
"type": "str",
},
"no_advertise_peer_as": {"type": "bool"},
"no_aggregator_id": {"type": "bool"},
"no_client_reflect": {"type": "bool"},
"no_precision_timers": {"type": "bool"},
"out_delay": {"type": "int"},
"outbound_route_filter": {
"options": {
"bgp_orf_cisco_mode": {"type": "bool"},
"prefix_based": {
"options": {
"accept": {
"options": {
"inet": {"type": "bool"},
"inet6": {"type": "bool"},
"set": {"type": "bool"},
},
"type": "dict",
},
"set": {"type": "bool"},
},
"type": "dict",
},
},
"type": "dict",
},
"output_queue_priority": {
"options": {
"defaults": {
"options": {
"high": {
"options": {
"expedited": {"type": "bool"},
"priority": {"type": "int"},
},
"type": "dict",
},
"low": {
"options": {
"expedited": {"type": "bool"},
"priority": {"type": "int"},
},
"type": "dict",
},
"medium": {
"options": {
"expedited": {"type": "bool"},
"priority": {"type": "int"},
},
"type": "dict",
},
},
"type": "dict",
},
"expedited_update_tokens": {
"type": "int",
"no_log": True,
},
"priority_update_tokens": {
"elements": "dict",
"no_log": True,
"options": {
"priority": {"required": True, "type": "int"},
"update_tokens": {
"required": True,
"type": "int",
"no_log": True,
},
},
"type": "list",
},
},
"type": "dict",
},
"passive": {"type": "bool"},
"path_selection": {
"options": {
"always_compare_med": {"type": "bool"},
"as_path_ignore": {"type": "bool"},
"cisco_non_deterministic": {"type": "bool"},
"external_router_id": {"type": "bool"},
"l2vpn_use_bgp_rules": {"type": "bool"},
"med_plus_igp": {
"options": {
"igp_multiplier": {"type": "int"},
"med_multiplier": {"type": "int"},
"set": {"type": "bool"},
},
"type": "dict",
},
},
"type": "dict",
},
"peer_as": {"type": "str"},
"precision_timers": {"type": "bool"},
"preference": {"type": "str"},
"remove_private": {
"options": {
"all": {"type": "bool"},
"all_replace": {"type": "bool"},
"all_replace_nearest": {"type": "bool"},
"no_peer_loop_check": {"type": "bool"},
"set": {"type": "bool"},
},
"type": "dict",
},
"rfc6514_compliant_safi129": {"type": "bool"},
"route_server_client": {"type": "bool"},
"send_addpath_optimization": {"type": "bool"},
"snmp_options": {
"options": {
"backward_traps_only_from_established": {
"type": "bool"
},
"emit_inet_address_length_in_oid": {"type": "bool"},
},
"type": "dict",
},
"sr_preference_override": {"type": "str"},
"stale_labels_holddown_period": {"type": "int"},
"tcp_aggressive_transmission": {"type": "bool"},
"tcp_mss": {"type": "int"},
"traceoptions": {
"options": {
"file": {
"options": {
"filename": {"required": True, "type": "str"},
"files": {"type": "int"},
"no_world_readable": {"type": "bool"},
"size": {"type": "int"},
"world_readable": {"type": "bool"},
},
"type": "dict",
},
"flag": {
"elements": "dict",
"options": {
"detail": {"type": "bool"},
"disable": {"type": "bool"},
"filter": {
"options": {
"match_on_prefix": {"type": "bool"},
"policy": {"type": "str"},
"set": {"type": "bool"},
},
"type": "dict",
},
"name": {
"choices": [
"4byte-as",
"add-path",
"all",
"bfd",
"damping",
"egress-te",
"general",
"graceful-restart",
"keepalive",
"normal",
"nsr-synchronization",
"open",
"packets",
"policy",
"refresh",
"route",
"state",
"task",
"thread-io",
"thread-update-io",
"timer",
"update",
],
"required": True,
"type": "str",
},
"receive": {"type": "bool"},
"send": {"type": "bool"},
},
"type": "list",
},
},
"type": "dict",
},
"traffic_statistics_labeled_path": {
"options": {
"file": {
"options": {
"filename": {"type": "str"},
"files": {"type": "int"},
"no_world_readable": {"type": "bool"},
"size": {"type": "int"},
"world_readable": {"type": "bool"},
},
"type": "dict",
},
"interval": {"type": "int"},
},
"type": "dict",
},
"ttl": {"type": "int"},
"unconfigured_peer_graceful_restart": {"type": "bool"},
"vpn_apply_export": {"type": "bool"},
},
"type": "dict",
},
"running_config": {"type": "str"},
"state": {
"choices": [
"purged",
"merged",
"replaced",
"deleted",
"gathered",
"parsed",
"rendered",
],
"default": "merged",
"type": "str",
},
} # pylint: disable=C0301
| 50.508648
| 85
| 0.205846
| 2,617
| 64,247
| 4.840275
| 0.134123
| 0.14968
| 0.06063
| 0.074524
| 0.856083
| 0.844004
| 0.843688
| 0.841241
| 0.841241
| 0.821742
| 0
| 0.004501
| 0.678397
| 64,247
| 1,271
| 86
| 50.548387
| 0.608557
| 0.009463
| 0
| 0.685784
| 0
| 0
| 0.198692
| 0.030203
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000808
| false
| 0.005654
| 0.003231
| 0
| 0.005654
| 0.000808
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6b62c694622a9ea3e2eb9b75ebb3bb93a2038652
| 16,911
|
py
|
Python
|
tests/test_context.py
|
proofit404/userstories
|
aebfc088c8b7aab80a227e48e29be638411ffd86
|
[
"BSD-2-Clause"
] | 187
|
2018-06-13T09:13:32.000Z
|
2020-05-28T05:02:23.000Z
|
tests/test_context.py
|
supadrupa/stories
|
3f1de66eae1216888eb5a7d2951013b8bbb4da25
|
[
"BSD-2-Clause"
] | 426
|
2018-04-02T14:12:31.000Z
|
2021-12-14T05:13:45.000Z
|
tests/test_context.py
|
proofit404/userstories
|
aebfc088c8b7aab80a227e48e29be638411ffd86
|
[
"BSD-2-Clause"
] | 15
|
2018-11-03T09:03:38.000Z
|
2020-05-10T17:16:47.000Z
|
import decimal
import pytest
from helpers import make_collector
from stories.exceptions import ContextContractError
from stories.exceptions import FailureError
from stories.exceptions import FailureProtocolError
from stories.exceptions import MutationError
def test_context_private_fields(r, c):
"""Deny access to the private fields of the context object."""
class T(c.Child, c.PrivateMethod):
pass
class J(c.Parent, c.NormalParentMethod):
def __init__(self):
self.x = T().x
# Simple.
assert r(T().x)() == {}
assert r(T().x.run)().value == {}
# Substory DI.
assert r(J().a)() == {}
assert r(J().a.run)().value == {}
def test_context_dir(r, c):
"""Show context variables in the `dir` output."""
class T(c.ParamChild, c.DirMethod):
pass
class J(c.ParamParent, c.DirParentMethod):
def __init__(self):
class T(c.Child, c.NormalMethod):
foo = 1
self.x = T().x
# Simple.
class Ctx:
bar = 2
assert r(T().x)(bar=2) == dir(Ctx())
assert r(T().x.run)(bar=2).value == dir(Ctx())
# Substory DI.
class Ctx:
foo = 1
bar = 2
assert r(J().a)(bar=1) == dir(Ctx())
assert r(J().a.run)(bar=1).value == dir(Ctx())
def test_deny_context_attribute_deletion(r, c):
"""We can't use attribute deletion with `Context` object."""
class T(c.Child, c.DeleteMethod):
pass
class J(c.Parent, c.NormalParentMethod):
def __init__(self):
self.x = T().x
expected = """
Context object is immutable.
Variables can not be removed from Context.
""".strip()
# Simple.
with pytest.raises(MutationError) as exc_info:
r(T().x)()
assert str(exc_info.value) == expected
with pytest.raises(MutationError) as exc_info:
r(T().x.run)()
assert str(exc_info.value) == expected
# Substory DI.
with pytest.raises(MutationError) as exc_info:
r(J().a)()
assert str(exc_info.value) == expected
with pytest.raises(MutationError) as exc_info:
r(J().a.run)()
assert str(exc_info.value) == expected
def test_deny_context_boolean_comparison(r, c):
class T(c.ParamChild, c.CompareMethod):
pass
class J(c.ParamParent, c.NormalParentMethod):
def __init__(self):
self.x = T().x
expected = """
Context object can not be used in boolean comparison.
Available variables: 'bar'
""".strip()
# Simple.
with pytest.raises(MutationError) as exc_info:
r(T().x)(bar=1)
assert str(exc_info.value) == expected
with pytest.raises(MutationError) as exc_info:
r(T().x.run)(bar=1)
assert str(exc_info.value) == expected
# Substory DI.
with pytest.raises(MutationError) as exc_info:
r(J().a)(bar=1)
assert str(exc_info.value) == expected
with pytest.raises(MutationError) as exc_info:
r(J().a.run)(bar=1)
assert str(exc_info.value) == expected
def test_context_proper_getattr_behavior(r, x):
expected = """
Branch.show_content
age_lt_18
age_gte_18
load_content (returned: 'allowed')
Context:
age: 18 # Story argument
access_allowed: True # Set by Branch.age_gte_18
""".strip()
getter = make_collector()
r(x.Branch().show_content)(age=18)
result = repr(getter())
assert result == expected
expected = """
Branch.show_content
age_lt_18
age_gte_18
load_content (returned: 'denied')
Context:
age: 1 # Story argument
access_allowed: False # Set by Branch.age_lt_18
""".strip()
getter = make_collector()
r(x.Branch().show_content)(age=1)
result = repr(getter())
assert result == expected
def test_context_attribute_error(r, x):
expected = """
'Context' object has no attribute x
AttributeAccessError.x
one
Context()
""".strip()
with pytest.raises(AttributeError) as err:
r(x.AttributeAccessError().x)()
result = str(err.value)
assert result == expected
def test_context_representation_with_failure(r, x):
expected = """
Simple.x
one
two (failed)
Context:
bar: 2 # Story argument
foo: 3 # Story argument
""".strip()
getter = make_collector()
with pytest.raises(FailureError):
r(x.Simple().x)(foo=3, bar=2)
assert repr(getter()) == expected
getter = make_collector()
r(x.Simple().x.run)(foo=3, bar=2)
assert repr(getter()) == expected
expected = """
SubstoryDI.y
start
before
x (Simple.x)
one
two (failed)
Context:
spam: 3 # Story argument
foo: 2 # Set by SubstoryDI.start
bar: 4 # Set by SubstoryDI.before
""".strip()
getter = make_collector()
with pytest.raises(FailureError):
r(x.SubstoryDI(x.Simple().x).y)(spam=3)
assert repr(getter()) == expected
getter = make_collector()
r(x.SubstoryDI(x.Simple().x).y.run)(spam=3)
assert repr(getter()) == expected
def test_context_representation_with_failure_reason_list(r, f):
class T(f.ChildWithList, f.StringMethod):
pass
class Q(f.ParentWithList, f.NormalParentMethod, T):
pass
class J(f.ParentWithList, f.NormalParentMethod):
def __init__(self):
self.x = T().x
# Simple.
expected = """
T.x
one (failed: 'foo')
Context()
""".strip()
getter = make_collector()
with pytest.raises(FailureError):
r(T().x)()
assert repr(getter()) == expected
getter = make_collector()
r(T().x.run)()
assert repr(getter()) == expected
# Substory DI.
expected = """
J.a
before
x (T.x)
one (failed: 'foo')
Context()
""".strip()
getter = make_collector()
with pytest.raises(FailureError):
r(J().a)()
assert repr(getter()) == expected
getter = make_collector()
r(J().a.run)()
assert repr(getter()) == expected
def test_context_representation_with_failure_reason_enum(r, f):
class T(f.ChildWithEnum, f.EnumMethod):
pass
class J(f.ParentWithEnum, f.NormalParentMethod):
def __init__(self):
self.x = T().x
# Simple.
expected = """
T.x
one (failed: <Errors.foo: 1>)
Context()
""".strip()
getter = make_collector()
with pytest.raises(FailureError):
r(T().x)()
assert repr(getter()) == expected
getter = make_collector()
r(T().x.run)()
assert repr(getter()) == expected
# Substory DI.
expected = """
J.a
before
x (T.x)
one (failed: <Errors.foo: 1>)
Context()
""".strip()
getter = make_collector()
with pytest.raises(FailureError):
r(J().a)()
assert repr(getter()) == expected
getter = make_collector()
r(J().a.run)()
assert repr(getter()) == expected
def test_context_representation_with_result(r, x):
expected = """
Simple.x
one
two
three (returned: -1)
Context:
bar: 3 # Story argument
foo: 1 # Story argument
baz: 4 # Set by Simple.two
""".strip()
getter = make_collector()
r(x.Simple().x)(foo=1, bar=3)
assert repr(getter()) == expected
getter = make_collector()
r(x.Simple().x.run)(foo=1, bar=3)
assert repr(getter()) == expected
expected = """
SubstoryDI.y
start
before
x (Simple.x)
one
two
three (returned: -1)
Context:
spam: 2 # Story argument
foo: 1 # Set by SubstoryDI.start
bar: 3 # Set by SubstoryDI.before
baz: 4 # Set by Simple.two
""".strip()
getter = make_collector()
r(x.SubstoryDI(x.Simple().x).y)(spam=2)
assert repr(getter()) == expected
getter = make_collector()
r(x.SubstoryDI(x.Simple().x).y.run)(spam=2)
assert repr(getter()) == expected
expected = """
SubstoryDI.y
start
before
x (Pipe.x)
one
two
three
after (returned: 6)
Context:
spam: 3 # Story argument
foo: 2 # Set by SubstoryDI.start
bar: 4 # Set by SubstoryDI.before
""".strip()
getter = make_collector()
r(x.SubstoryDI(x.Pipe().x).y)(spam=3)
assert repr(getter()) == expected
getter = make_collector()
r(x.SubstoryDI(x.Pipe().x).y.run)(spam=3)
assert repr(getter()) == expected
def test_context_representation_with_next(r, x):
expected = """
Simple.x
one
two (skipped)
Context:
bar: -1 # Story argument
foo: 1 # Story argument
""".strip()
getter = make_collector()
r(x.Simple().x)(foo=1, bar=-1)
assert repr(getter()) == expected
getter = make_collector()
r(x.Simple().x.run)(foo=1, bar=-1)
assert repr(getter()) == expected
expected = """
SubstoryDI.y
start
before
x (Simple.x)
one
two (skipped)
after (returned: -4)
Context:
spam: -2 # Story argument
foo: -3 # Set by SubstoryDI.start
bar: -1 # Set by SubstoryDI.before
""".strip()
getter = make_collector()
r(x.SubstoryDI(x.Simple().x).y)(spam=-2)
assert repr(getter()) == expected
getter = make_collector()
r(x.SubstoryDI(x.Simple().x).y.run)(spam=-2)
assert repr(getter()) == expected
def test_context_representation_with_error(r, x):
expected = """
StepError.x
one (errored: ExpectedException)
Context()
""".strip()
getter = make_collector()
with pytest.raises(x.ExpectedException):
r(x.StepError().x)()
assert repr(getter()) == expected
getter = make_collector()
with pytest.raises(x.ExpectedException):
r(x.StepError().x.run)()
assert repr(getter()) == expected
def test_context_representation_with_failure_protocol_error(r, f):
expected = """
T.x
one (errored: FailureProtocolError)
Context()
""".strip()
class T(f.ChildWithList, f.WrongMethod):
pass
getter = make_collector()
with pytest.raises(FailureProtocolError):
r(T().x)()
assert repr(getter()) == expected
getter = make_collector()
with pytest.raises(FailureProtocolError):
r(T().x.run)()
assert repr(getter()) == expected
def test_context_representation_with_context_contract_error(r, m):
class T(m.ParamChildWithNull, m.StringMethod):
pass
class J(m.ParamParentWithNull, m.StringParentMethod):
def __init__(self):
self.x = T().x
# Simple.
expected = """
T.x
one (errored: ContextContractError)
Context:
bar: 2 # Story argument
foo: 1 # Story argument
""".strip()
getter = make_collector()
with pytest.raises(ContextContractError):
r(T().x)(foo=1, bar=2)
assert repr(getter()) == expected
getter = make_collector()
with pytest.raises(ContextContractError):
r(T().x.run)(foo=1, bar=2)
assert repr(getter()) == expected
# Substory DI.
expected = """
J.a
before
x (T.x)
one (errored: ContextContractError)
Context:
eggs: 2 # Story argument
ham: 1 # Story argument
foo: '1' # Set by J.before
bar: ['2'] # Set by J.before
""".strip()
getter = make_collector()
with pytest.raises(ContextContractError):
r(J().a)(ham=1, eggs=2)
assert repr(getter()) == expected
getter = make_collector()
with pytest.raises(ContextContractError):
r(J().a.run)(ham=1, eggs=2)
assert repr(getter()) == expected
def test_context_representation_with_missing_variables(r, m):
class T(m.ParamChildWithNull, m.NormalMethod):
pass
class J(m.ParentWithNull, m.NormalParentMethod):
def __init__(self):
self.x = T().x
# Simple.
expected = """
T.x (errored: ContextContractError)
Context()
""".strip()
getter = make_collector()
with pytest.raises(ContextContractError):
r(T().x)()
assert repr(getter()) == expected
getter = make_collector()
with pytest.raises(ContextContractError):
r(T().x.run)()
assert repr(getter()) == expected
# Substory DI.
expected = """
J.a
before
x (T.x) (errored: ContextContractError)
Context()
""".strip()
getter = make_collector()
with pytest.raises(ContextContractError):
r(J().a)()
assert repr(getter()) == expected
getter = make_collector()
with pytest.raises(ContextContractError):
r(J().a.run)()
assert repr(getter()) == expected
def test_context_representation_arguments_order(r, x):
# Simple.
expected = """
Simple.x
one
two
three (returned: -1)
Context:
bar: 3 # Story argument
foo: 1 # Story argument
baz: 4 # Set by Simple.two
""".strip()
getter = make_collector()
r(x.Simple().x)(bar=3, foo=1)
assert repr(getter()) == expected
getter = make_collector()
r(x.Simple().x.run)(bar=3, foo=1)
assert repr(getter()) == expected
# FIXME: Substory DI.
def test_context_representation_long_variable(r, c):
class T(c.ParamChild, c.NormalMethod):
foo = list(range(23))
class J(c.ParamParent, c.NormalParentMethod):
def __init__(self):
self.x = T().x
# Simple.
expected = """
T.x
one
Context:
bar: 'baz' # Story argument
foo: # Set by T.one
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
""".strip()
getter = make_collector()
r(T().x)(bar="baz")
assert repr(getter()) == expected
getter = make_collector()
r(T().x.run)(bar="baz")
assert repr(getter()) == expected
# Substory DI.
expected = """
J.a
before
x (T.x)
one
after
Context:
bar: 'baz' # Story argument
foo: # Set by T.one
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
""".strip()
getter = make_collector()
r(J().a)(bar="baz")
assert repr(getter()) == expected
getter = make_collector()
r(J().a.run)(bar="baz")
assert repr(getter()) == expected
def test_context_representation_multiline_variable(r, c):
class userlist(list):
def __repr__(self):
return "\n ".join(super().__repr__().split())
class T(c.ParamChild, c.NormalMethod):
foo = userlist(range(3))
class J(c.ParamParent, c.NormalParentMethod):
def __init__(self):
self.x = T().x
# Simple.
expected = """
T.x
one
Context:
bar: 'baz' # Story argument
foo: # Set by T.one
[0,
1,
2]
""".strip()
getter = make_collector()
r(T().x)(bar="baz")
assert repr(getter()) == expected
getter = make_collector()
r(T().x.run)(bar="baz")
assert repr(getter()) == expected
# Substory DI.
expected = """
J.a
before
x (T.x)
one
after
Context:
bar: 'baz' # Story argument
foo: # Set by T.one
[0,
1,
2]
""".strip()
getter = make_collector()
r(J().a)(bar="baz")
assert repr(getter()) == expected
getter = make_collector()
r(J().a.run)(bar="baz")
assert repr(getter()) == expected
def test_context_representation_variable_aliases(r, c):
class T(c.ParamChild, c.NormalMethod):
foo = "baz"
class J(c.ParamParent, c.NormalParentMethod):
def __init__(self):
self.x = T().x
# Simple.
expected = """
T.x
one
Context:
bar: 'baz' # Story argument
foo: `bar` alias # Set by T.one
""".strip()
getter = make_collector()
r(T().x)(bar=T.foo)
assert repr(getter()) == expected
getter = make_collector()
r(T().x.run)(bar=T.foo)
assert repr(getter()) == expected
# Substory DI.
expected = """
J.a
before
x (T.x)
one
after
Context:
bar: 'baz' # Story argument
foo: `bar` alias # Set by T.one
""".strip()
getter = make_collector()
r(J().a)(bar=T.foo)
assert repr(getter()) == expected
getter = make_collector()
r(J().a.run)(bar=T.foo)
assert repr(getter()) == expected
@pytest.mark.parametrize("arg", [None, True, 1, 1.0, decimal.Decimal("1.0")])
def test_context_representation_variable_aliases_ignore(r, c, arg):
class T(c.ParamChild, c.NormalMethod):
foo = arg
class J(c.ParamParent, c.NormalParentMethod):
def __init__(self):
self.x = T().x
# Simple.
expected = """
T.x
one
Context:
bar: %(arg)s # Story argument
foo: %(arg)s # Set by T.one
""".strip() % {
"arg": repr(arg)
}
getter = make_collector()
r(T().x)(bar=T.foo)
assert repr(getter()) == expected
getter = make_collector()
r(T().x.run)(bar=T.foo)
assert repr(getter()) == expected
# Substory DI.
expected = """
J.a
before
x (T.x)
one
after
Context:
bar: %(arg)s # Story argument
foo: %(arg)s # Set by T.one
""".strip() % {
"arg": repr(arg)
}
getter = make_collector()
r(J().a)(bar=T.foo)
assert repr(getter()) == expected
getter = make_collector()
r(J().a.run)(bar=T.foo)
assert repr(getter()) == expected
| 20.350181
| 86
| 0.597658
| 2,220
| 16,911
| 4.447748
| 0.087838
| 0.072412
| 0.103909
| 0.126393
| 0.815475
| 0.790257
| 0.751671
| 0.722402
| 0.704578
| 0.691817
| 0
| 0.014497
| 0.249483
| 16,911
| 830
| 87
| 20.374699
| 0.763473
| 0.025782
| 0
| 0.785592
| 0
| 0.003431
| 0.233662
| 0.007728
| 0
| 0
| 0
| 0.001205
| 0.121784
| 1
| 0.056604
| false
| 0.017153
| 0.012007
| 0.001715
| 0.133791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6b6571c228557adef4f40addf60916256251c915
| 15,773
|
py
|
Python
|
tests/test_filters.py
|
redhat-cip/dci-downloader
|
d7ad184f502e145b9ccf2618b8af6d82ac7daa8a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_filters.py
|
redhat-cip/dci-downloader
|
d7ad184f502e145b9ccf2618b8af6d82ac7daa8a
|
[
"Apache-2.0"
] | 3
|
2019-10-30T20:39:06.000Z
|
2021-03-31T07:55:00.000Z
|
tests/test_filters.py
|
redhat-cip/dci-downloader
|
d7ad184f502e145b9ccf2618b8af6d82ac7daa8a
|
[
"Apache-2.0"
] | 2
|
2020-01-12T05:27:00.000Z
|
2020-06-19T02:39:24.000Z
|
from dci_downloader.filters import filter_files_list
from dci_downloader.settings import get_settings
def test_default_filter_files_list():
dci_files_list = {
"directories": [],
"files": [
{
"path": "",
"sha256": "954719cab91afac5bc142656afff86e6d8e87570b035cbce65dbbb84892a40d3",
"name": ".composeinfo",
"size": 14496,
},
{
"path": "AppStream/x86_64/debug/tree/Packages",
"sha256": "6f48f0d285918e502035da74decf447c6bb29898206406a4ed6a92ece94d276a",
"name": "PackageKit-command-not-found-debuginfo-1.1.12-2.el8.x86_64.rpm",
"size": 45052,
},
{
"path": "AppStream/x86_64/os/Packages",
"sha256": "8fe293470f677bfc6eb04204c47b5e1a0e5d15431ef7ed9dbb269aaea386ed9f",
"name": "PackageKit-command-not-found-1.1.12-2.el8.x86_64.rpm",
"size": 28616,
},
{
"path": "BaseOS/x86_64/os/Packages",
"sha256": "7949b18b6d359b435686f2f5781928675ec8b2872b96f0abf6ba10747f794694",
"name": "avahi-libs-0.7-19.el8.i686.rpm",
"size": 68920,
},
{
"path": "BaseOS/x86_64/iso",
"sha256": "06fd27c0279d5b42078f7de66d056c7875d025d1eb89a29dd2777240459c1026",
"name": "RHEL-8.4.0-20201020.n.2-BaseOS-x86_64-boot.iso",
"size": 731906048,
},
{
"path": "AppStream/s390x/os/Packages",
"sha256": "6f48f0d285918e502035da74decf447c6bb29898206406a4ed6a92ece94d276a",
"name": "PackageKit-command-not-found-debuginfo-1.1.12-2.el8.s390x.rpm",
"size": 29562,
},
{
"path": "AppStream/x86_64/os",
"sha256": "6f48f0d285918e502035da74decf447c6bb29898206406a4ed6a92ece94d276a",
"name": ".treeinfo",
"size": 29562,
},
],
"symlinks": [],
}
settings = get_settings(sys_args=["RHEL-8", "/tmp"])["topics"][0]
expected_files_list = {
"directories": [],
"files": [
{
"path": "",
"sha256": "954719cab91afac5bc142656afff86e6d8e87570b035cbce65dbbb84892a40d3",
"name": ".composeinfo",
"size": 14496,
},
{
"path": "AppStream/x86_64/os/Packages",
"sha256": "8fe293470f677bfc6eb04204c47b5e1a0e5d15431ef7ed9dbb269aaea386ed9f",
"name": "PackageKit-command-not-found-1.1.12-2.el8.x86_64.rpm",
"size": 28616,
},
{
"path": "BaseOS/x86_64/os/Packages",
"sha256": "7949b18b6d359b435686f2f5781928675ec8b2872b96f0abf6ba10747f794694",
"name": "avahi-libs-0.7-19.el8.i686.rpm",
"size": 68920,
},
{
"path": "AppStream/x86_64/os",
"sha256": "6f48f0d285918e502035da74decf447c6bb29898206406a4ed6a92ece94d276a",
"name": ".treeinfo",
"size": 29562,
},
],
"symlinks": [],
}
assert filter_files_list(dci_files_list, settings) == expected_files_list
def test_filter_files_list_with_debug():
dci_files_list = {
"directories": [],
"files": [
{
"path": "AppStream/x86_64/debug/tree/Packages",
"sha256": "6f48f0d285918e502035da74decf447c6bb29898206406a4ed6a92ece94d276a",
"name": "PackageKit-command-not-found-debuginfo-1.1.12-2.el8.x86_64.rpm",
"size": 45052,
},
{
"path": "AppStream/x86_64/os/Packages",
"sha256": "8fe293470f677bfc6eb04204c47b5e1a0e5d15431ef7ed9dbb269aaea386ed9f",
"name": "PackageKit-command-not-found-1.1.12-2.el8.x86_64.rpm",
"size": 28616,
},
],
"symlinks": [],
}
settings = get_settings(
sys_args=["RHEL-8", "/tmp", "--variant", "AppStream", "--debug"]
)["topics"][0]
expected_files_list = {
"directories": [],
"files": [
{
"path": "AppStream/x86_64/debug/tree/Packages",
"sha256": "6f48f0d285918e502035da74decf447c6bb29898206406a4ed6a92ece94d276a",
"name": "PackageKit-command-not-found-debuginfo-1.1.12-2.el8.x86_64.rpm",
"size": 45052,
},
{
"path": "AppStream/x86_64/os/Packages",
"sha256": "8fe293470f677bfc6eb04204c47b5e1a0e5d15431ef7ed9dbb269aaea386ed9f",
"name": "PackageKit-command-not-found-1.1.12-2.el8.x86_64.rpm",
"size": 28616,
},
],
"symlinks": [],
}
assert filter_files_list(dci_files_list, settings) == expected_files_list
def test_filter_files_list_with_debug_keep_os_folder_nrt():
dci_files_list = {
"directories": [],
"files": [
{
"path": "AppStream/x86_64/debug/tree/Packages",
"sha256": "6f48f0d285918e502035da74decf447c6bb29898206406a4ed6a92ece94d276a",
"name": "PackageKit-command-not-found-debuginfo-1.1.12-2.el8.x86_64.rpm",
"size": 45052,
},
{
"path": "AppStream/x86_64/os/Packages",
"sha256": "8fe293470f677bfc6eb04204c47b5e1a0e5d15431ef7ed9dbb269aaea386ed9f",
"name": "PackageKit-command-not-found-1.1.12-2.el8.x86_64.rpm",
"size": 28616,
},
{
"path": "CRB/ppc64le/os/repodata",
"sha256": "b033f8c928c3a5ba43c0a0e87a839b5ce24698e8c0a7c9c4a33e564040231805",
"name": "repomd.xml",
"size": 3268,
},
{
"path": "CRB/ppc64le/debug/tree/repodata",
"sha256": "974ebd02506a4945d4f1a7ac8ac5d5a05e675e0e4bd860de667425adf02d2570",
"name": "repomd.xml",
"size": 1562,
},
],
"symlinks": [],
}
args = ["RHEL-8", "/tmp", "--arch=ppc64le", "--debug"]
settings = get_settings(sys_args=args)["topics"][0]
expected_files_list = {
"directories": [],
"files": [
{
"path": "CRB/ppc64le/os/repodata",
"sha256": "b033f8c928c3a5ba43c0a0e87a839b5ce24698e8c0a7c9c4a33e564040231805",
"name": "repomd.xml",
"size": 3268,
},
{
"path": "CRB/ppc64le/debug/tree/repodata",
"sha256": "974ebd02506a4945d4f1a7ac8ac5d5a05e675e0e4bd860de667425adf02d2570",
"name": "repomd.xml",
"size": 1562,
},
],
"symlinks": [],
}
assert filter_files_list(dci_files_list, settings) == expected_files_list
def test_filter_files_list_with_iso():
dci_files_list = {
"directories": [],
"files": [
{
"path": "BaseOS/x86_64/debug/tree/Packages",
"sha256": "6f48f0d285918e502035da74decf447c6bb29898206406a4ed6a92ece94d276a",
"name": "PackageKit-command-not-found-debuginfo-1.1.12-2.el8.x86_64.rpm",
"size": 45052,
},
{
"path": "BaseOS/x86_64/iso",
"sha256": "06fd27c0279d5b42078f7de66d056c7875d025d1eb89a29dd2777240459c1026",
"name": "RHEL-8.4.0-20201020.n.2-BaseOS-x86_64-boot.iso",
"size": 731906048,
},
{
"path": "BaseOS/x86_64/os/Packages",
"sha256": "8fe293470f677bfc6eb04204c47b5e1a0e5d15431ef7ed9dbb269aaea386ed9f",
"name": "PackageKit-command-not-found-1.1.12-2.el8.x86_64.rpm",
"size": 28616,
},
],
"symlinks": [],
}
settings = get_settings(
sys_args=["RHEL-8", "/tmp", "--variant", "BaseOS", "--iso"]
)["topics"][0]
expected_files_list = {
"directories": [],
"files": [
{
"path": "BaseOS/x86_64/iso",
"sha256": "06fd27c0279d5b42078f7de66d056c7875d025d1eb89a29dd2777240459c1026",
"name": "RHEL-8.4.0-20201020.n.2-BaseOS-x86_64-boot.iso",
"size": 731906048,
},
{
"path": "BaseOS/x86_64/os/Packages",
"sha256": "8fe293470f677bfc6eb04204c47b5e1a0e5d15431ef7ed9dbb269aaea386ed9f",
"name": "PackageKit-command-not-found-1.1.12-2.el8.x86_64.rpm",
"size": 28616,
},
],
"symlinks": [],
}
assert filter_files_list(dci_files_list, settings) == expected_files_list
def test_non_existing_variants_are_ignored():
dci_files_list = {
"directories": [],
"files": [
{
"path": "",
"sha256": "954719cab91afac5bc142656afff86e6d8e87570b035cbce65dbbb84892a40d3",
"name": ".composeinfo",
"size": 14496,
},
{
"path": "AppStream/x86_64/debug/tree/Packages",
"sha256": "6f48f0d285918e502035da74decf447c6bb29898206406a4ed6a92ece94d276a",
"name": "PackageKit-command-not-found-debuginfo-1.1.12-2.el8.x86_64.rpm",
"size": 45052,
},
{
"path": "AppStream/x86_64/os/Packages",
"sha256": "8fe293470f677bfc6eb04204c47b5e1a0e5d15431ef7ed9dbb269aaea386ed9f",
"name": "PackageKit-command-not-found-1.1.12-2.el8.x86_64.rpm",
"size": 28616,
},
{
"path": "BaseOS/x86_64/os/Packages",
"sha256": "7949b18b6d359b435686f2f5781928675ec8b2872b96f0abf6ba10747f794694",
"name": "avahi-libs-0.7-19.el8.i686.rpm",
"size": 68920,
},
{
"path": "AppStream/s390x/os/Packages",
"sha256": "6f48f0d285918e502035da74decf447c6bb29898206406a4ed6a92ece94d276a",
"name": "PackageKit-command-not-found-debuginfo-1.1.12-2.el8.s390x.rpm",
"size": 29562,
},
],
"symlinks": [],
}
settings = get_settings(sys_args=["RHEL-8", "/tmp", "--variant", "Server"])[
"topics"
][0]
expected_files_list = {
"directories": [],
"files": [
{
"path": "",
"sha256": "954719cab91afac5bc142656afff86e6d8e87570b035cbce65dbbb84892a40d3",
"name": ".composeinfo",
"size": 14496,
}
],
"symlinks": [],
}
assert filter_files_list(dci_files_list, settings) == expected_files_list
def test_filter_files_list_download_everything():
dci_files_list = {
"directories": [],
"files": [
{
"path": "",
"sha256": "954719cab91afac5bc142656afff86e6d8e87570b035cbce65dbbb84892a40d3",
"name": ".composeinfo",
"size": 14496,
},
{
"path": "AppStream/x86_64/debug/tree/Packages",
"sha256": "6f48f0d285918e502035da74decf447c6bb29898206406a4ed6a92ece94d276a",
"name": "PackageKit-command-not-found-debuginfo-1.1.12-2.el8.x86_64.rpm",
"size": 45052,
},
{
"path": "AppStream/x86_64/os/Packages",
"sha256": "8fe293470f677bfc6eb04204c47b5e1a0e5d15431ef7ed9dbb269aaea386ed9f",
"name": "PackageKit-command-not-found-1.1.12-2.el8.x86_64.rpm",
"size": 28616,
},
{
"path": "BaseOS/x86_64/os/Packages",
"sha256": "7949b18b6d359b435686f2f5781928675ec8b2872b96f0abf6ba10747f794694",
"name": "avahi-libs-0.7-19.el8.i686.rpm",
"size": 68920,
},
{
"path": "BaseOS/x86_64/iso",
"sha256": "06fd27c0279d5b42078f7de66d056c7875d025d1eb89a29dd2777240459c1026",
"name": "RHEL-8.4.0-20201020.n.2-BaseOS-x86_64-boot.iso",
"size": 731906048,
},
{
"path": "AppStream/s390x/os/Packages",
"sha256": "6f48f0d285918e502035da74decf447c6bb29898206406a4ed6a92ece94d276a",
"name": "PackageKit-command-not-found-debuginfo-1.1.12-2.el8.s390x.rpm",
"size": 29562,
},
],
"symlinks": [],
}
settings = get_settings(sys_args=["RHEL-8", "/tmp", "--all"])["topics"][0]
expected_files_list = {
"directories": [],
"files": [
{
"path": "",
"sha256": "954719cab91afac5bc142656afff86e6d8e87570b035cbce65dbbb84892a40d3",
"name": ".composeinfo",
"size": 14496,
},
{
"path": "AppStream/x86_64/debug/tree/Packages",
"sha256": "6f48f0d285918e502035da74decf447c6bb29898206406a4ed6a92ece94d276a",
"name": "PackageKit-command-not-found-debuginfo-1.1.12-2.el8.x86_64.rpm",
"size": 45052,
},
{
"path": "AppStream/x86_64/os/Packages",
"sha256": "8fe293470f677bfc6eb04204c47b5e1a0e5d15431ef7ed9dbb269aaea386ed9f",
"name": "PackageKit-command-not-found-1.1.12-2.el8.x86_64.rpm",
"size": 28616,
},
{
"path": "BaseOS/x86_64/os/Packages",
"sha256": "7949b18b6d359b435686f2f5781928675ec8b2872b96f0abf6ba10747f794694",
"name": "avahi-libs-0.7-19.el8.i686.rpm",
"size": 68920,
},
{
"path": "BaseOS/x86_64/iso",
"sha256": "06fd27c0279d5b42078f7de66d056c7875d025d1eb89a29dd2777240459c1026",
"name": "RHEL-8.4.0-20201020.n.2-BaseOS-x86_64-boot.iso",
"size": 731906048,
},
{
"path": "AppStream/s390x/os/Packages",
"sha256": "6f48f0d285918e502035da74decf447c6bb29898206406a4ed6a92ece94d276a",
"name": "PackageKit-command-not-found-debuginfo-1.1.12-2.el8.s390x.rpm",
"size": 29562,
},
],
"symlinks": [],
}
assert filter_files_list(dci_files_list, settings) == expected_files_list
def test_nrt_always_download_metadata():
dci_files_list = {
"directories": [],
"files": [
{
"path": "metadata",
"sha256": "6f48f0d285918e502035da74decf447c6bb29898206406a4ed6a92ece94d276a",
"name": "rpms.json",
"size": 45052,
},
],
"symlinks": [],
}
settings = get_settings(sys_args=["RHEL-8", "/tmp", "--variant", "AppStream"])[
"topics"
][0]
expected_files_list = {
"directories": [],
"files": [
{
"path": "metadata",
"sha256": "6f48f0d285918e502035da74decf447c6bb29898206406a4ed6a92ece94d276a",
"name": "rpms.json",
"size": 45052,
},
],
"symlinks": [],
}
assert filter_files_list(dci_files_list, settings) == expected_files_list
| 38.659314
| 93
| 0.518481
| 1,168
| 15,773
| 6.848459
| 0.083904
| 0.033129
| 0.057757
| 0.066008
| 0.969246
| 0.965996
| 0.96337
| 0.96337
| 0.945243
| 0.945243
| 0
| 0.267565
| 0.345781
| 15,773
| 407
| 94
| 38.7543
| 0.507607
| 0
| 0
| 0.683673
| 0
| 0.068878
| 0.45337
| 0.347683
| 0
| 0
| 0
| 0
| 0.017857
| 1
| 0.017857
| false
| 0
| 0.005102
| 0
| 0.022959
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6bcc482e2871a1cdbf9a933fd856b720281bda7d
| 24,554
|
py
|
Python
|
gui/tests/test_changeValues.py
|
a-bombarda/mvm-gui
|
e00c3fe39cf25c6fb2d2725891610da8885d1d76
|
[
"MIT"
] | null | null | null |
gui/tests/test_changeValues.py
|
a-bombarda/mvm-gui
|
e00c3fe39cf25c6fb2d2725891610da8885d1d76
|
[
"MIT"
] | null | null | null |
gui/tests/test_changeValues.py
|
a-bombarda/mvm-gui
|
e00c3fe39cf25c6fb2d2725891610da8885d1d76
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# from pytestqt import qt_compat
from pytestqt.qt_compat import qt_api
import pytest
import time
from .mvm_basics import *
from mainwindow import MainWindow
from start_stop_worker import StartStopWorker
from PyQt5.QtCore import QCoreApplication
"""
TS21
"""
def test_changePSV_RR(qtbot):
'''
Test the change of the RR
'''
assert qt_api.QApplication.instance() is not None
esp32 = FakeESP32Serial(config)
qtbot.addWidget(esp32)
assert config is not None
window = MainWindow(config, esp32)
qtbot.addWidget(window)
window.show()
qtbot.mouseClick(window.button_new_patient, QtCore.Qt.LeftButton)
qtbot.mouseClick(window.button_start_vent, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.toolbar
# Enter the menu and the PSV Settings tab
qtbot.mouseClick(window.button_menu, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.menu
qtbot.mouseClick(window.button_settingsfork, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.settingsfork
qtbot.mouseClick(window.button_settings, QtCore.Qt.LeftButton)
assert window.toppane.currentWidget() == window.settings
# Try to increase the value
startingValue = window.settings._all_spinboxes['respiratory_rate'].value()
i = startingValue
oldValue = 0
while i <= int(config['respiratory_rate']['max'] + 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('respiratory_rate', i)
oldValue = i
i = i + int(config['respiratory_rate']['step'])
assert window.settings._all_spinboxes['respiratory_rate'].value() <= config['respiratory_rate']['max']
# Try to decrease the value
window._start_stop_worker._settings.update_spinbox_value('respiratory_rate', startingValue)
i = startingValue
oldValue = 0
while i >= int(config['respiratory_rate']['min'] - 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('respiratory_rate', i)
oldValue = i
i = i - int(config['respiratory_rate']['step'])
assert window.settings._all_spinboxes['respiratory_rate'].value() >= config['respiratory_rate']['min']
"""
TS22
"""
def test_changePSV_PINSP(qtbot):
'''
Test the change of the Pinsp
'''
assert qt_api.QApplication.instance() is not None
esp32 = FakeESP32Serial(config)
qtbot.addWidget(esp32)
assert config is not None
window = MainWindow(config, esp32)
qtbot.addWidget(window)
window.show()
qtbot.mouseClick(window.button_new_patient, QtCore.Qt.LeftButton)
qtbot.mouseClick(window.button_start_vent, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.toolbar
# Enter the menu and the PSV Settings tab
qtbot.mouseClick(window.button_menu, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.menu
qtbot.mouseClick(window.button_settingsfork, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.settingsfork
qtbot.mouseClick(window.button_settings, QtCore.Qt.LeftButton)
assert window.toppane.currentWidget() == window.settings
# Try to increase the value
startingValue = window.settings._all_spinboxes['insp_pressure'].value()
i = startingValue
oldValue = 0
while i <= int(config['insp_pressure']['max'] + 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('insp_pressure', i)
oldValue = i
i = i + int(config['insp_pressure']['step'])
assert window.settings._all_spinboxes['insp_pressure'].value() <= config['insp_pressure']['max']
# Try to decrease the value
window._start_stop_worker._settings.update_spinbox_value('insp_pressure', startingValue)
i = startingValue
oldValue = 0
while i >= int(config['insp_pressure']['min'] - 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('insp_pressure', i)
oldValue = i
i = i - int(config['insp_pressure']['step'])
assert window.settings._all_spinboxes['insp_pressure'].value() >= config['insp_pressure']['min']
"""
TS26
"""
def test_changePSV_RR_presets(qtbot):
'''
Test the change of the RR
'''
assert qt_api.QApplication.instance() is not None
esp32 = FakeESP32Serial(config)
qtbot.addWidget(esp32)
assert config is not None
window = MainWindow(config, esp32)
qtbot.addWidget(window)
window.show()
qtbot.mouseClick(window.button_new_patient, QtCore.Qt.LeftButton)
qtbot.mouseClick(window.button_start_vent, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.toolbar
# Enter the menu and the PSV Settings tab
qtbot.mouseClick(window.button_menu, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.menu
qtbot.mouseClick(window.button_settingsfork, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.settingsfork
qtbot.mouseClick(window.button_settings, QtCore.Qt.LeftButton)
assert window.toppane.currentWidget() == window.settings
qtbot.mouseClick(window.settings.fake_btn_rr, QtCore.Qt.LeftButton)
assert window.settings._current_preset.isVisible()
"""
TS21
"""
def test_changePSV_RR_2(qtbot):
'''
Test the change of the RR
'''
assert qt_api.QApplication.instance() is not None
esp32 = FakeESP32Serial(config)
qtbot.addWidget(esp32)
assert config is not None
window = MainWindow(config, esp32)
qtbot.addWidget(window)
window.show()
qtbot.mouseClick(window.button_new_patient, QtCore.Qt.LeftButton)
qtbot.mouseClick(window.button_start_vent, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.toolbar
# Enter the menu and the PSV Settings tab
qtbot.mouseClick(window.button_menu, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.menu
qtbot.mouseClick(window.button_settingsfork, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.settingsfork
qtbot.mouseClick(window.button_settings, QtCore.Qt.LeftButton)
assert window.toppane.currentWidget() == window.settings
"""
TS50
"""
def test_changePRM(qtbot):
'''
Test the change of the Pressure for Lung Recruitment
'''
assert qt_api.QApplication.instance() is not None
esp32 = FakeESP32Serial(config)
qtbot.addWidget(esp32)
assert config is not None
window = MainWindow(config, esp32)
qtbot.addWidget(window)
window.show()
qtbot.mouseClick(window.button_new_patient, QtCore.Qt.LeftButton)
qtbot.mouseClick(window.button_start_vent, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.toolbar
# Enter the menu and the Mode Settings tab
qtbot.mouseClick(window.button_menu, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.menu
qtbot.mouseClick(window.button_settingsfork, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.settingsfork
qtbot.mouseClick(window.button_settings, QtCore.Qt.LeftButton)
assert window.toppane.currentWidget() == window.settings
# Try to increase the value
startingValue = window.settings._all_spinboxes['lung_recruit_pres'].value()
i = startingValue
oldValue = 0
while i <= int(config['lung_recruit_pres']['max'] + 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('lung_recruit_pres', i)
oldValue = i
i = i + int(config['lung_recruit_pres']['step'])
assert window.settings._all_spinboxes['lung_recruit_pres'].value() <= config['lung_recruit_pres']['max']
# Try to decrease the value
window._start_stop_worker._settings.update_spinbox_value('lung_recruit_pres', startingValue)
i = startingValue
oldValue = 0
while i >= int(config['lung_recruit_pres']['min'] - 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('lung_recruit_pres', i)
oldValue = i
i = i - int(config['lung_recruit_pres']['step'])
assert window.settings._all_spinboxes['lung_recruit_pres'].value() >= config['lung_recruit_pres']['min']
"""
TS51
"""
def test_changeTRM(qtbot):
'''
Test the change of the Time for Lung Recruitment
'''
assert qt_api.QApplication.instance() is not None
esp32 = FakeESP32Serial(config)
qtbot.addWidget(esp32)
assert config is not None
window = MainWindow(config, esp32)
qtbot.addWidget(window)
window.show()
qtbot.mouseClick(window.button_new_patient, QtCore.Qt.LeftButton)
qtbot.mouseClick(window.button_start_vent, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.toolbar
# Enter the menu and the Mode Settings tab
qtbot.mouseClick(window.button_menu, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.menu
qtbot.mouseClick(window.button_settingsfork, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.settingsfork
qtbot.mouseClick(window.button_settings, QtCore.Qt.LeftButton)
assert window.toppane.currentWidget() == window.settings
# Try to increase the value
startingValue = window.settings._all_spinboxes['lung_recruit_time'].value()
i = startingValue
oldValue = 0
while i <= int(config['lung_recruit_time']['max'] + 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('lung_recruit_time', i)
oldValue = i
i = i + int(config['lung_recruit_time']['step'])
assert window.settings._all_spinboxes['lung_recruit_time'].value() <= config['lung_recruit_time']['max']
# Try to decrease the value
window._start_stop_worker._settings.update_spinbox_value('lung_recruit_time', startingValue)
i = startingValue
oldValue = 0
while i >= int(config['lung_recruit_time']['min'] - 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('lung_recruit_time', i)
oldValue = i
i = i - int(config['lung_recruit_time']['step'])
assert window.settings._all_spinboxes['lung_recruit_time'].value() >= config['lung_recruit_time']['min']
"""
TS52
"""
def test_change_ETS(qtbot):
'''
Test the change of the ETS Parameter
At the current situation, the test cannot be executed, since the ETS parameter is not loaded from the default values
'''
assert qt_api.QApplication.instance() is not None
esp32 = FakeESP32Serial(config)
qtbot.addWidget(esp32)
assert config is not None
window = MainWindow(config, esp32)
qtbot.addWidget(window)
window.show()
qtbot.mouseClick(window.button_new_patient, QtCore.Qt.LeftButton)
qtbot.mouseClick(window.button_start_vent, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.toolbar
# Enter the menu and the Mode Settings tab
qtbot.mouseClick(window.button_menu, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.menu
qtbot.mouseClick(window.button_settingsfork, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.settingsfork
qtbot.mouseClick(window.button_settings, QtCore.Qt.LeftButton)
assert window.toppane.currentWidget() == window.settings
# Try to increase the value
startingValue = window.settings._all_spinboxes['flow_trigger'].value()
i = startingValue
oldValue = 0
while i <= int(config['flow_trigger']['max'] + 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('flow_trigger', i)
oldValue = i
i = i + int(config['flow_trigger']['step'])
assert window.settings._all_spinboxes['flow_trigger'].value() <= config['flow_trigger']['max']
# Try to decrease the value
window._start_stop_worker._settings.update_spinbox_value('flow_trigger', startingValue)
i = startingValue
oldValue = 0
while i >= int(config['flow_trigger']['min'] - 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('flow_trigger', i)
oldValue = i
i = i - int(config['flow_trigger']['step'])
assert window.settings._all_spinboxes['flow_trigger'].value() >= config['flow_trigger']['min']
"""
TS58
"""
def test_change_ITS_PSV(qtbot):
'''
Test the change of the ITS Parameter
At the current situation, the test cannot be executed, since the ITS parameter is not loaded from the default values
'''
assert qt_api.QApplication.instance() is not None
esp32 = FakeESP32Serial(config)
qtbot.addWidget(esp32)
assert config is not None
window = MainWindow(config, esp32)
qtbot.addWidget(window)
window.show()
qtbot.mouseClick(window.button_new_patient, QtCore.Qt.LeftButton)
qtbot.mouseClick(window.button_start_vent, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.toolbar
# Enter the menu and the Mode Settings tab
qtbot.mouseClick(window.button_menu, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.menu
qtbot.mouseClick(window.button_settingsfork, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.settingsfork
qtbot.mouseClick(window.button_settings, QtCore.Qt.LeftButton)
assert window.toppane.currentWidget() == window.settings
# Try to increase the value
startingValue = window.settings._all_spinboxes['pressure_trigger'].value()
i = startingValue
oldValue = 0
while i <= int(config['pressure_trigger']['max'] + 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('pressure_trigger', i)
oldValue = i
i = i + int(config['pressure_trigger']['step'])
assert window.settings._all_spinboxes['pressure_trigger'].value() <= config['pressure_trigger']['max']
# Try to decrease the value
window._start_stop_worker._settings.update_spinbox_value('pressure_trigger', startingValue)
i = startingValue
oldValue = 0
while i >= int(config['pressure_trigger']['min'] - 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('pressure_trigger', i)
oldValue = i
i = i - int(config['pressure_trigger']['step'])
assert window.settings._all_spinboxes['pressure_trigger'].value() >= config['pressure_trigger']['min']
"""
TS59
"""
def test_change_ITS_PCV(qtbot):
'''
Test the change of the ITS Parameter
At the current situation, the test cannot be executed, since the ITS parameter is not loaded from the default values
'''
assert qt_api.QApplication.instance() is not None
esp32 = FakeESP32Serial(config)
qtbot.addWidget(esp32)
assert config is not None
window = MainWindow(config, esp32)
qtbot.addWidget(window)
window.show()
qtbot.mouseClick(window.button_new_patient, QtCore.Qt.LeftButton)
qtbot.mouseClick(window.button_start_vent, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.toolbar
# Enter the menu and the Mode Settings tab
qtbot.mouseClick(window.button_menu, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.menu
qtbot.mouseClick(window.button_settingsfork, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.settingsfork
qtbot.mouseClick(window.button_settings, QtCore.Qt.LeftButton)
assert window.toppane.currentWidget() == window.settings
# Try to increase the value
startingValue = window.settings._all_spinboxes['pcv_trigger_pressure'].value()
i = startingValue
oldValue = 0
while i <= int(config['pcv_trigger_pressure']['max'] + 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('pcv_trigger_pressure', i)
oldValue = i
i = i + int(config['pcv_trigger_pressure']['step'])
assert window.settings._all_spinboxes['pcv_trigger_pressure'].value() <= config['pcv_trigger_pressure']['max']
# Try to decrease the value
window._start_stop_worker._settings.update_spinbox_value('pcv_trigger_pressure', startingValue)
i = startingValue
oldValue = 0
while i >= int(config['pcv_trigger_pressure']['min'] - 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('pcv_trigger_pressure', i)
oldValue = i
i = i - int(config['pcv_trigger_pressure']['step'])
assert window.settings._all_spinboxes['pcv_trigger_pressure'].value() >= config['pcv_trigger_pressure']['min']
"""
TS60
"""
def test_change_apenea_rr(qtbot):
assert qt_api.QApplication.instance() is not None
esp32 = FakeESP32Serial(config)
qtbot.addWidget(esp32)
assert config is not None
window = MainWindow(config, esp32)
qtbot.addWidget(window)
window.show()
qtbot.mouseClick(window.button_new_patient, QtCore.Qt.LeftButton)
qtbot.mouseClick(window.button_start_vent, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.toolbar
# Enter the menu and the Mode Settings tab
qtbot.mouseClick(window.button_menu, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.menu
qtbot.mouseClick(window.button_settingsfork, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.settingsfork
qtbot.mouseClick(window.button_settings, QtCore.Qt.LeftButton)
assert window.toppane.currentWidget() == window.settings
# Try to increase the value
startingValue = window.settings._all_spinboxes['apnea_rr'].value()
i = startingValue
oldValue = 0
while i <= int(config['apnea_rr']['max'] + 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('apnea_rr', i)
oldValue = i
i = i + int(config['apnea_rr']['step'])
assert window.settings._all_spinboxes['apnea_rr'].value() <= config['apnea_rr']['max']
# Try to decrease the value
window._start_stop_worker._settings.update_spinbox_value('apnea_rr', startingValue)
i = startingValue
oldValue = 0
while i >= int(config['apnea_rr']['min'] - 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('apnea_rr', i)
oldValue = i
i = i - int(config['apnea_rr']['step'])
assert window.settings._all_spinboxes['apnea_rr'].value() >= config['apnea_rr']['min']
"""
TS61
"""
def test_change_apenea_pinsp(qtbot):
assert qt_api.QApplication.instance() is not None
esp32 = FakeESP32Serial(config)
qtbot.addWidget(esp32)
assert config is not None
window = MainWindow(config, esp32)
qtbot.addWidget(window)
window.show()
qtbot.mouseClick(window.button_new_patient, QtCore.Qt.LeftButton)
qtbot.mouseClick(window.button_start_vent, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.toolbar
# Enter the menu and the Mode Settings tab
qtbot.mouseClick(window.button_menu, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.menu
qtbot.mouseClick(window.button_settingsfork, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.settingsfork
qtbot.mouseClick(window.button_settings, QtCore.Qt.LeftButton)
assert window.toppane.currentWidget() == window.settings
# Try to increase the value
startingValue = window.settings._all_spinboxes['apnea_insp_press'].value()
i = startingValue
oldValue = 0
while i <= int(config['apnea_insp_press']['max'] + 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('apnea_insp_press', i)
oldValue = i
i = i + int(config['apnea_insp_press']['step'])
assert window.settings._all_spinboxes['apnea_insp_press'].value() <= config['apnea_insp_press']['max']
# Try to decrease the value
window._start_stop_worker._settings.update_spinbox_value('apnea_insp_press', startingValue)
i = startingValue
oldValue = 0
while i >= int(config['apnea_insp_press']['min'] - 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('apnea_insp_press', i)
oldValue = i
i = i - int(config['apnea_insp_press']['step'])
assert window.settings._all_spinboxes['apnea_insp_press'].value() >= config['apnea_insp_press']['min']
"""
TS53
"""
def test_change_ApneaLag(qtbot):
'''
Test the change of the Apnea Lag Parameter
'''
assert qt_api.QApplication.instance() is not None
esp32 = FakeESP32Serial(config)
qtbot.addWidget(esp32)
assert config is not None
window = MainWindow(config, esp32)
qtbot.addWidget(window)
window.show()
qtbot.mouseClick(window.button_new_patient, QtCore.Qt.LeftButton)
qtbot.mouseClick(window.button_start_vent, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.toolbar
# Enter the menu and the Mode Settings tab
qtbot.mouseClick(window.button_menu, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.menu
qtbot.mouseClick(window.button_settingsfork, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.settingsfork
qtbot.mouseClick(window.button_settings, QtCore.Qt.LeftButton)
assert window.toppane.currentWidget() == window.settings
# Try to increase the value
startingValue = window.settings._all_spinboxes['max_apnea_time'].value()
i = startingValue
oldValue = 0
while i <= int(config['max_apnea_time']['max'] + 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('max_apnea_time', i)
oldValue = i
i = i + int(config['max_apnea_time']['step'])
assert window.settings._all_spinboxes['max_apnea_time'].value() <= config['max_apnea_time']['max']
# Try to decrease the value
window._start_stop_worker._settings.update_spinbox_value('max_apnea_time', startingValue)
i = startingValue
oldValue = 0
while i >= int(config['max_apnea_time']['min'] - 1) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('max_apnea_time', i)
oldValue = i
i = i - int(config['max_apnea_time']['step'])
assert window.settings._all_spinboxes['max_apnea_time'].value() >= config['max_apnea_time']['min']
"""
TS54
"""
def test_changePCV_IE(qtbot):
'''
Test the change of the I:E
'''
assert qt_api.QApplication.instance() is not None
esp32 = FakeESP32Serial(config)
qtbot.addWidget(esp32)
assert config is not None
window = MainWindow(config, esp32)
qtbot.addWidget(window)
window.show()
qtbot.mouseClick(window.button_new_patient, QtCore.Qt.LeftButton)
qtbot.mouseClick(window.button_start_vent, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.toolbar
# Enter the menu and the PSV Settings tab
qtbot.mouseClick(window.button_menu, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.menu
qtbot.mouseClick(window.button_settingsfork, QtCore.Qt.LeftButton)
assert window.bottombar.currentWidget() == window.settingsfork
qtbot.mouseClick(window.button_settings, QtCore.Qt.LeftButton)
assert window.toppane.currentWidget() == window.settings
# Try to increase the value
startingValue = window.settings._all_spinboxes['insp_expir_ratio'].value()
i = startingValue
oldValue = 0
while i <= float(config['insp_expir_ratio']['max'] + float(config['insp_expir_ratio']['step'])) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('insp_expir_ratio', i)
oldValue = i
i = i + float(config['insp_expir_ratio']['step'])
assert window.settings._all_spinboxes['insp_expir_ratio'].value() <= config['insp_expir_ratio']['max']
# Try to decrease the value
window._start_stop_worker._settings.update_spinbox_value('insp_expir_ratio', startingValue)
i = startingValue
oldValue = 0
while i >= float(config['insp_expir_ratio']['min'] - float(config['insp_expir_ratio']['step'])) or i == oldValue:
window._start_stop_worker._settings.update_spinbox_value('insp_expir_ratio', i)
oldValue = i
i = i - float(config['insp_expir_ratio']['step'])
assert window.settings._all_spinboxes['insp_expir_ratio'].value() >= config['insp_expir_ratio']['min']
| 37.372907
| 120
| 0.716543
| 3,036
| 24,554
| 5.592227
| 0.045125
| 0.05301
| 0.081635
| 0.103369
| 0.964248
| 0.958476
| 0.950348
| 0.947874
| 0.945164
| 0.944104
| 0
| 0.008572
| 0.168567
| 24,554
| 657
| 121
| 37.372907
| 0.823071
| 0.077218
| 0
| 0.729064
| 0
| 0
| 0.100498
| 0
| 0
| 0
| 0
| 0
| 0.248768
| 1
| 0.03202
| false
| 0
| 0.017241
| 0
| 0.049261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d4401d3547dd7d64c9b576bae30550129023dbb8
| 33,802
|
py
|
Python
|
intersight/apis/cvd_template_api.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 21
|
2018-03-29T14:20:35.000Z
|
2021-10-13T05:11:41.000Z
|
intersight/apis/cvd_template_api.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 14
|
2018-01-30T15:45:46.000Z
|
2022-02-23T14:23:21.000Z
|
intersight/apis/cvd_template_api.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 18
|
2018-01-03T15:09:56.000Z
|
2021-07-16T02:21:54.000Z
|
# coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-961
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class CvdTemplateApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def cvd_templates_get(self, **kwargs):
"""
Get a list of 'cvdTemplate' instances
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cvd_templates_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool count: The $count query option allows clients to request a count of the matching resources.
:param str inlinecount: The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response
:param int top: The max number of documents to return.
:param int skip: The number of documents to skip.
:param str filter: Filter criteria for documents to return. A URI with a $filter System Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in $filter operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: $filter=Name eq 'Bob' $filter=Tags/any(t: t/Key eq 'Site') $filter=Tags/any(t: t/Key eq 'Site' and t/Value eq 'London')
:param str select: Specifies a subset of properties to return.
:param str orderby: Determines what values are used to order a collection of documents.
:param str expand: Specify additional attributes or related documents to return. Supports only 'DisplayNames' attribute now. Query examples: $expand=DisplayNames
:param str apply: Specify one or more transformation operations to perform aggregation on documents. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. Query examples: $apply=groupby((Model), aggregate($count as Total)) $apply=groupby((Model), aggregate(AvailableMemory with average as AverageAvailableMemory))
:param str at: Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for documents to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: at=VersionType eq 'Configured' at=InterestedMos.Moid eq '5b5877e56c6730367acf46cd'
:return: CvdTemplateList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.cvd_templates_get_with_http_info(**kwargs)
else:
(data) = self.cvd_templates_get_with_http_info(**kwargs)
return data
def cvd_templates_get_with_http_info(self, **kwargs):
"""
Get a list of 'cvdTemplate' instances
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cvd_templates_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool count: The $count query option allows clients to request a count of the matching resources.
:param str inlinecount: The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response
:param int top: The max number of documents to return.
:param int skip: The number of documents to skip.
:param str filter: Filter criteria for documents to return. A URI with a $filter System Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in $filter operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: $filter=Name eq 'Bob' $filter=Tags/any(t: t/Key eq 'Site') $filter=Tags/any(t: t/Key eq 'Site' and t/Value eq 'London')
:param str select: Specifies a subset of properties to return.
:param str orderby: Determines what values are used to order a collection of documents.
:param str expand: Specify additional attributes or related documents to return. Supports only 'DisplayNames' attribute now. Query examples: $expand=DisplayNames
:param str apply: Specify one or more transformation operations to perform aggregation on documents. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. Query examples: $apply=groupby((Model), aggregate($count as Total)) $apply=groupby((Model), aggregate(AvailableMemory with average as AverageAvailableMemory))
:param str at: Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for documents to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: at=VersionType eq 'Configured' at=InterestedMos.Moid eq '5b5877e56c6730367acf46cd'
:return: CvdTemplateList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['count', 'inlinecount', 'top', 'skip', 'filter', 'select', 'orderby', 'expand', 'apply', 'at']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cvd_templates_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'count' in params:
query_params.append(('$count', params['count']))
if 'inlinecount' in params:
query_params.append(('$inlinecount', params['inlinecount']))
if 'top' in params:
query_params.append(('$top', params['top']))
if 'skip' in params:
query_params.append(('$skip', params['skip']))
if 'filter' in params:
query_params.append(('$filter', params['filter']))
if 'select' in params:
query_params.append(('$select', params['select']))
if 'orderby' in params:
query_params.append(('$orderby', params['orderby']))
if 'expand' in params:
query_params.append(('$expand', params['expand']))
if 'apply' in params:
query_params.append(('$apply', params['apply']))
if 'at' in params:
query_params.append(('at', params['at']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/cvd/Templates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CvdTemplateList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def cvd_templates_moid_delete(self, moid, **kwargs):
"""
Delete an instance of 'cvdTemplate'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cvd_templates_moid_delete(moid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the cvdTemplate instance. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.cvd_templates_moid_delete_with_http_info(moid, **kwargs)
else:
(data) = self.cvd_templates_moid_delete_with_http_info(moid, **kwargs)
return data
def cvd_templates_moid_delete_with_http_info(self, moid, **kwargs):
"""
Delete an instance of 'cvdTemplate'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cvd_templates_moid_delete_with_http_info(moid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the cvdTemplate instance. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['moid']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cvd_templates_moid_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'moid' is set
if ('moid' not in params) or (params['moid'] is None):
raise ValueError("Missing the required parameter `moid` when calling `cvd_templates_moid_delete`")
collection_formats = {}
path_params = {}
if 'moid' in params:
path_params['moid'] = params['moid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/cvd/Templates/{moid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def cvd_templates_moid_get(self, moid, **kwargs):
"""
Get a specific instance of 'cvdTemplate'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cvd_templates_moid_get(moid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the cvdTemplate instance. (required)
:return: CvdTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.cvd_templates_moid_get_with_http_info(moid, **kwargs)
else:
(data) = self.cvd_templates_moid_get_with_http_info(moid, **kwargs)
return data
def cvd_templates_moid_get_with_http_info(self, moid, **kwargs):
"""
Get a specific instance of 'cvdTemplate'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cvd_templates_moid_get_with_http_info(moid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the cvdTemplate instance. (required)
:return: CvdTemplate
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['moid']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cvd_templates_moid_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'moid' is set
if ('moid' not in params) or (params['moid'] is None):
raise ValueError("Missing the required parameter `moid` when calling `cvd_templates_moid_get`")
collection_formats = {}
path_params = {}
if 'moid' in params:
path_params['moid'] = params['moid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/cvd/Templates/{moid}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CvdTemplate',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def cvd_templates_moid_patch(self, moid, body, **kwargs):
"""
Update an instance of 'cvdTemplate'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cvd_templates_moid_patch(moid, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the cvdTemplate instance. (required)
:param CvdTemplate body: cvdTemplate to update (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.cvd_templates_moid_patch_with_http_info(moid, body, **kwargs)
else:
(data) = self.cvd_templates_moid_patch_with_http_info(moid, body, **kwargs)
return data
def cvd_templates_moid_patch_with_http_info(self, moid, body, **kwargs):
"""
Update an instance of 'cvdTemplate'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cvd_templates_moid_patch_with_http_info(moid, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the cvdTemplate instance. (required)
:param CvdTemplate body: cvdTemplate to update (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['moid', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cvd_templates_moid_patch" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'moid' is set
if ('moid' not in params) or (params['moid'] is None):
raise ValueError("Missing the required parameter `moid` when calling `cvd_templates_moid_patch`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `cvd_templates_moid_patch`")
collection_formats = {}
path_params = {}
if 'moid' in params:
path_params['moid'] = params['moid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/cvd/Templates/{moid}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def cvd_templates_moid_post(self, moid, body, **kwargs):
"""
Update an instance of 'cvdTemplate'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cvd_templates_moid_post(moid, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the cvdTemplate instance. (required)
:param CvdTemplate body: cvdTemplate to update (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.cvd_templates_moid_post_with_http_info(moid, body, **kwargs)
else:
(data) = self.cvd_templates_moid_post_with_http_info(moid, body, **kwargs)
return data
def cvd_templates_moid_post_with_http_info(self, moid, body, **kwargs):
"""
Update an instance of 'cvdTemplate'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cvd_templates_moid_post_with_http_info(moid, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the cvdTemplate instance. (required)
:param CvdTemplate body: cvdTemplate to update (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['moid', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cvd_templates_moid_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'moid' is set
if ('moid' not in params) or (params['moid'] is None):
raise ValueError("Missing the required parameter `moid` when calling `cvd_templates_moid_post`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `cvd_templates_moid_post`")
collection_formats = {}
path_params = {}
if 'moid' in params:
path_params['moid'] = params['moid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/cvd/Templates/{moid}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def cvd_templates_post(self, body, **kwargs):
"""
Create an instance of 'cvdTemplate'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cvd_templates_post(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CvdTemplate body: cvdTemplate to add (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.cvd_templates_post_with_http_info(body, **kwargs)
else:
(data) = self.cvd_templates_post_with_http_info(body, **kwargs)
return data
def cvd_templates_post_with_http_info(self, body, **kwargs):
"""
Create an instance of 'cvdTemplate'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.cvd_templates_post_with_http_info(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CvdTemplate body: cvdTemplate to add (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cvd_templates_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `cvd_templates_post`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/cvd/Templates', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 47.408135
| 820
| 0.598426
| 3,672
| 33,802
| 5.327614
| 0.073529
| 0.049072
| 0.031079
| 0.022083
| 0.9565
| 0.93958
| 0.931452
| 0.92312
| 0.918315
| 0.914839
| 0
| 0.001794
| 0.323945
| 33,802
| 712
| 821
| 47.474719
| 0.85428
| 0.41255
| 0
| 0.75
| 1
| 0
| 0.155136
| 0.040237
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036932
| false
| 0
| 0.019886
| 0
| 0.110795
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d451a8fbccd24b9acd0061311d46dae703e874e7
| 49
|
py
|
Python
|
tf/example/passwd.py
|
tolkien/misc
|
84651346a3a0053b6a2af31db26c227a34da33c8
|
[
"MIT"
] | null | null | null |
tf/example/passwd.py
|
tolkien/misc
|
84651346a3a0053b6a2af31db26c227a34da33c8
|
[
"MIT"
] | null | null | null |
tf/example/passwd.py
|
tolkien/misc
|
84651346a3a0053b6a2af31db26c227a34da33c8
|
[
"MIT"
] | null | null | null |
from notebook.auth import passwd
print(passwd())
| 16.333333
| 32
| 0.795918
| 7
| 49
| 5.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 49
| 2
| 33
| 24.5
| 0.886364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
|
0
| 7
|
d486765c43907caa230896762051c7787df201de
| 55,706
|
py
|
Python
|
write_presets.py
|
palfrey/autocovid
|
264cba63ae90205058be6b602d5955b44f61d172
|
[
"MIT"
] | null | null | null |
write_presets.py
|
palfrey/autocovid
|
264cba63ae90205058be6b602d5955b44f61d172
|
[
"MIT"
] | 1
|
2020-11-05T15:54:31.000Z
|
2020-11-05T15:54:31.000Z
|
write_presets.py
|
palfrey/autocovid
|
264cba63ae90205058be6b602d5955b44f61d172
|
[
"MIT"
] | 1
|
2020-10-31T22:35:44.000Z
|
2020-10-31T22:35:44.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
gen_presets.py
Generate preset (.pickle) files
Created on Wed Oct 7 13:55:28 2020
"""
import os, pickle, numpy as np
preset_folder = "presets"
background_file = ""
if(not os.path.isdir(preset_folder)): os.makedirs(preset_folder)
def write_pickle():
d_list=[short_name,sqrt_rates,plot_risk_weighted_ltla,plot_risk_weighted_ltla_binned,ltla_vmax,plot_classified_ltla,ltla_classifier_mode,ltla_classifier_bins,footer_message,plot_ranks,plot_relative,relative_days,plot_msoa_boundaries,target_places,colour_map,msoa_colour_map,lsoa_colour_map,msoa_alpha,lsoa_alpha,frame_margins,label_x,label_y,title_x,title_y,plot_wales,plot_scotland,plot_towns,
plot_laa,title_string,laa_linewidth,standalone_plot,post_process,resize_output,
heat_lim,transparent,add_date,add_background,add_overlay,add_title,target_width,
target_height,plot_laa_names,plot_laa_values,plot_ltla_data,plot_msoa_data,plot_lsoa_data,plot_combined_data,text_align_mode,
date_font_size,title_font_size,laa_fontsize,mask_colour,add_footer,restrict_laa_to_targets,f_scale,overlay_filenames,overlay_positions,background_file]
with open(preset_folder+os.path.sep+short_name+".pickle","wb") as f: pickle.dump(d_list,f)
sqrt_rates = False
#Default (England, combined data, with background)
short_name="default"
footer_message = "Based on LTLA and MSOA case data from coronavirus.data.gov.uk."
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='YlOrRd'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [133000,658000,10600,655000]
label_x=650000
label_y=576000
title_x=650000
title_y=605000
plot_msoa_boundaries=True
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "Covid-19 Heatmap"
laa_linewidth= 0.6
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = False
add_title = True
target_width = 1080
target_height = 1324
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = True
text_align_mode = 'right'
date_font_size = 80
title_font_size = 70
laa_fontsize = 14
mask_colour='#EEEEEE'
add_footer = True
restrict_laa_to_targets = False
f_scale = 3.3
overlay_filenames = []
overlay_positions = []
ltla_vmax = 200
write_pickle()
#LTLA Heatmap - based on PHE England maps
short_name="london-phe"
footer_message = "Based on LTLA case data from coronavirus.data.gov.uk."
plot_classified_ltla = True
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = 'manual'
ltla_classifier_bins = [0.1,5,10,15,30,45,10000]
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='jet'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [502000,562000,153000,205000]
label_x=502000
label_y=192000
title_x=502000
title_y=195000
plot_msoa_boundaries=False
plot_wales=False
plot_scotland=False
plot_towns=False
plot_laa = True
title_string = ""
laa_linewidth= 4
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = False
add_background = False
add_overlay = True
add_title = False
target_width = 606
target_height = 486
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'center'
date_font_size = 76
title_font_size = 64
laa_fontsize = 14
mask_colour='#EEEEEEAA'
add_footer = False
restrict_laa_to_targets = False
f_scale = 2.3
overlay_filenames = ['phe_london_overlay.png']
overlay_positions = [[0,0]]
background_file = "ltla-bg.png"
write_pickle()
#LTLA Heatmap - based on PHE England maps
short_name="ltla-phe"
footer_message = ""
plot_classified_ltla = True
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = 'manual'
ltla_classifier_bins = [0.1,5,10,15,30,45,10000]
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='jet'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [0,660000,8000,668000]
label_x=545000
label_y=596000
title_x=395500
title_y=675000
plot_msoa_boundaries=False
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "Covid-19 Case Rate in LTLA Areas"
laa_linewidth= 0.6
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = True
add_title = False
target_width = 1600
target_height = 1600
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'center'
date_font_size = 76
title_font_size = 64
laa_fontsize = 14
mask_colour='#EEEEEEAA'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.1
overlay_filenames = ['ccr-7bin.png','plots/london-phe/']
overlay_positions = [[0,0],[40,190]]
background_file = "ltla-bg.png"
write_pickle()
#LTLA Heatmap - based on PHE England maps, extended by 3 bins
short_name="london-phex"
footer_message = "Based on LTLA case data from coronavirus.data.gov.uk."
plot_classified_ltla = True
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = 'manual'
ltla_classifier_bins = [0.1,5,10,15,30,45,90,180,360,10000]
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='jet'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [502000,562000,153000,205000]
label_x=502000
label_y=192000
title_x=502000
title_y=195000
plot_msoa_boundaries=False
plot_wales=False
plot_scotland=False
plot_towns=False
plot_laa = True
title_string = ""
laa_linewidth= 4
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = False
add_background = False
add_overlay = True
add_title = False
target_width = 606
target_height = 486
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'center'
date_font_size = 76
title_font_size = 64
laa_fontsize = 14
mask_colour='#EEEEEEAA'
add_footer = False
restrict_laa_to_targets = False
f_scale = 2.3
overlay_filenames = ['phe_london_overlay.png']
overlay_positions = [[0,0]]
background_file = "ltla-bg.png"
write_pickle()
#LTLA Heatmap - doubling bins
short_name="doubling-london"
footer_message = "Based on LTLA case data from coronavirus.data.gov.uk."
plot_classified_ltla = True
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = 'manual'
ltla_classifier_bins = [1,2,4,8,16,32,64,128,256,10000]
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='jet'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [502000,562000,153000,205000]
label_x=502000
label_y=192000
title_x=502000
title_y=195000
plot_msoa_boundaries=False
plot_wales=False
plot_scotland=False
plot_towns=False
plot_laa = True
title_string = ""
laa_linewidth= 4
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = False
add_background = False
add_overlay = True
add_title = False
target_width = 606
target_height = 486
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'center'
date_font_size = 76
title_font_size = 64
laa_fontsize = 14
mask_colour='#EEEEEEAA'
add_footer = False
restrict_laa_to_targets = False
f_scale = 2.3
overlay_filenames = ['phe_london_overlay.png']
overlay_positions = [[0,0]]
background_file = "ltla-bg.png"
write_pickle()
#LTLA Heatmap - doubling bins
short_name="doubling-london-big"
footer_message = "Based on LTLA case data from coronavirus.data.gov.uk."
plot_classified_ltla = True
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = 'manual'
ltla_classifier_bins = [1,2,4,8,16,32,64,128,256,10000]
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='jet'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [502000,562000,153000,205000]
label_x=560000
label_y=201100
title_x=502000
title_y=193000
plot_msoa_boundaries=False
plot_wales=False
plot_scotland=False
plot_towns=False
plot_laa = True
title_string = ""
laa_linewidth= 4
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = True
add_title = False
target_width = 1348
target_height = 1080
plot_laa_names=True
plot_laa_values=True
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'right'
date_font_size = 76
title_font_size = 64
laa_fontsize = 22
mask_colour='#EEEEEEAA'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.16
overlay_filenames = ['london-overlay.png']
overlay_positions = [[0,0]]
background_file = "ltla-bg.png"
write_pickle()
#LTLA Heatmap - doubling bins - London area - for Autocovid
short_name="autocovid_london"
footer_message = "Based on LTLA case data from coronavirus.data.gov.uk."
plot_classified_ltla = True
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = 'manual'
ltla_classifier_bins = [1,2,4,8,16,32,64,128,256,10000]
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='jet'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [502000,562000,153000,205000]
label_x=502000
label_y=192000
title_x=502000
title_y=195000
plot_msoa_boundaries=False
plot_wales=False
plot_scotland=False
plot_towns=False
plot_laa = True
title_string = ""
laa_linewidth= 4
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = True
add_date = False
add_background = False
add_overlay = True
add_title = False
target_width = 280
target_height = 225
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'center'
date_font_size = 76
title_font_size = 64
laa_fontsize = 14
mask_colour='#EEEEEEAA'
add_footer = False
restrict_laa_to_targets = False
f_scale = 2.3
overlay_filenames = ['autocovid_london_overlay.png']
overlay_positions = [[0,0]]
background_file = "ltla-bg.png"
write_pickle()
#LTLA Heatmap - doubling bins - for Autocovid
short_name="autocovid_map"
footer_message = ""
plot_classified_ltla = True
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = 'manual'
ltla_classifier_bins = [1,2,4,8,16,32,64,128,256,10000]
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='jet'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [133000,660000,8000,668000]
label_x=545000
label_y=596000
title_x=395500
title_y=675000
plot_msoa_boundaries=False
plot_wales=False
plot_scotland=False
plot_towns=False
plot_laa = True
title_string = ""
laa_linewidth= 1.5
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = True
add_date = False
add_background = False
add_overlay = False
add_title = False
target_width = 865
target_height = 1060
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'center'
date_font_size = 76
title_font_size = 64
laa_fontsize = 14
mask_colour='#EEEEEEAA'
add_footer = False
restrict_laa_to_targets = False
f_scale = 2.3
overlay_filenames = ['phe_london_overlay.png']
overlay_positions = [[0,0]]
background_file = "ltla-bg.png"
write_pickle()
#LTLA Heatmap - based on PHE England maps - extended by 3 bins
short_name="doubling-bin"
footer_message = ""
plot_classified_ltla = True
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = 'manual'
ltla_classifier_bins = [1,2,4,8,16,32,64,128,256,10000]
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='jet'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [0,660000,8000,668000]
label_x=545000
label_y=596000
title_x=395500
title_y=675000
plot_msoa_boundaries=False
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "Covid-19 Case Rate in LTLA Areas"
laa_linewidth= 0.6
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = True
add_title = False
target_width = 1600
target_height = 1600
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'center'
date_font_size = 76
title_font_size = 64
laa_fontsize = 14
mask_colour='#EEEEEEAA'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.1
overlay_filenames = ['doubling.png','plots/doubling-london/']
overlay_positions = [[0,0],[40,190]]
background_file = "ltla-bg.png"
write_pickle()
#LTLA Heatmap - based on PHE England maps - extended by 3 bins
short_name="ltla-phex"
footer_message = ""
plot_classified_ltla = True
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = 'manual'
ltla_classifier_bins = [0.1,5,10,15,30,45,90,180,360,10000]
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='jet'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [0,660000,8000,668000]
label_x=545000
label_y=596000
title_x=395500
title_y=675000
plot_msoa_boundaries=False
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "Covid-19 Case Rate in LTLA Areas"
laa_linewidth= 0.6
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = True
add_title = False
target_width = 1600
target_height = 1600
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'center'
date_font_size = 76
title_font_size = 64
laa_fontsize = 14
mask_colour='#EEEEEEAA'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.1
overlay_filenames = ['ccr-10bin.png','plots/london-phex/']
overlay_positions = [[0,0],[40,190]]
background_file = "ltla-bg.png"
write_pickle()
#LTLA Heatmap - blocks
short_name="ltla-q"
footer_message = "Based on LTLA case data from coronavirus.data.gov.uk."
plot_classified_ltla = True
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = 'manual'
ltla_classifier_bins = [0.1,5,10,15,30,45,10000]
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='rainbow'
#Approximate PHE Colour map
#Bins 0-5 Pale Blue [230 248 248]
#c1 = np.array([217/256, 246/256, 193/256, 1])
#c2 =
# 10-15 [244 237 126]
# 15-30 [255 170 35]
# 30-45 [251 43 29]
# 45+ [170 17 11]
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [132000,659000,9600,695000]
label_x=545000
label_y=596000
title_x=395500
title_y=675000
plot_msoa_boundaries=False
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "Covid-19 Case Rate in LTLA Areas"
laa_linewidth= 0.6
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = False
add_title = True
target_width = 1080
target_height = 1400
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'center'
date_font_size = 76
title_font_size = 64
laa_fontsize = 14
mask_colour='#EEEEEE'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.72
overlay_filenames = []
overlay_positions = []
background_file = "ltla-bg.png"
write_pickle()
#LTLA Heatmap
short_name="ltla"
footer_message = "Based on LTLA case data from coronavirus.data.gov.uk."
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='YlGnBu'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [132000,659000,9600,675000]
label_x=545000
label_y=596000
title_x=650000
title_y=605000
plot_msoa_boundaries=False
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "Covid-19 LTLA Heatmap"
laa_linewidth= 0.6
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = True
add_date = True
add_background = True
add_overlay = True
add_title = False
target_width = 1080
target_height = 1360
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = True
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'center'
date_font_size = 76
title_font_size = 54
laa_fontsize = 14
mask_colour='#406080'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.72
overlay_positions = []
overlay_filenames = ['ltla-fg.png']
background_file = "ltla-bg.png"
write_pickle()
#LTLA Ranked Heatmap
short_name="rank"
footer_message = "Based on average case rate from LTLA case data @ coronavirus.data.gov.uk."
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
plot_ranks = True
plot_relative = False
relative_days = 7
target_places = []
colour_map='bwr'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [132000,659000,9600,675000]
label_x=542000
label_y=596000
title_x=541000
title_y=655000
plot_msoa_boundaries=False
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "LTLAs ranked by C19 Case Rate"
laa_linewidth= 0.6
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = False
add_title = True
target_width = 1080
target_height = 1360
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'center'
date_font_size = 76
title_font_size = 42
laa_fontsize = 14
mask_colour='#406080'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.5
overlay_positions = []
overlay_filenames = []
background_file = "ltla-bg.png"
write_pickle()
#Heatmap with background image
short_name="heatmap"
footer_message = "Based on LTLA and MSOA case data from coronavirus.data.gov.uk."
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='YlOrRd'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [133000,658000,10600,655000]
label_x=547000
label_y=576000
title_x=650000
title_y=605000
plot_msoa_boundaries=True
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "Covid-19 Heatmap"
laa_linewidth= 0.6
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = True
add_date = True
add_background = True
add_overlay = False
add_title = False
target_width = 1080
target_height = 1324
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = True
text_align_mode = 'center'
date_font_size = 64
title_font_size = 70
laa_fontsize = 14
mask_colour='#122B49'
add_footer = False
restrict_laa_to_targets = False
f_scale = 3.3
overlay_positions = []
overlay_filenames = []
background_file = "heatmap.png"
write_pickle()
#Rate of change plot - 7 day
short_name="relative7"
footer_message = "Compares daily case rate in a given LTLA with same value 7 days earlier. Based on LTLA case data from coronavirus.data.gov.uk."
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
plot_ranks = False
plot_relative = True
relative_days = 7
target_places = []
colour_map='bwr'
msoa_colour_map='winter'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [133000,658000,10600,655000]
label_x=654000
label_y=576000
title_x=654000
title_y=605000
plot_msoa_boundaries=True
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "7-day rate of change in C19 Cases"
laa_linewidth= 0.6
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = True
add_title = True
target_width = 1080
target_height = 1324
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'right'
date_font_size = 70
title_font_size = 36
laa_fontsize = 14
mask_colour='#EEEEEE'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.6
overlay_positions = [[32,340]]
overlay_filenames = ['relative_key.png']
background_file=''
write_pickle()
#Rate of change plot - 14 day
short_name="relative14"
footer_message = "Compares daily case rate in a given LTLA with same value 14 days earlier. Based on LTLA case data from coronavirus.data.gov.uk."
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
plot_ranks = False
plot_relative = True
relative_days = 14
target_places = []
colour_map='bwr'
msoa_colour_map='winter'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [133000,658000,10600,655000]
label_x=654000
label_y=576000
title_x=654000
title_y=605000
plot_msoa_boundaries=True
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "14-day rate of change in C19 Cases"
laa_linewidth= 0.6
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = True
add_title = True
target_width = 1080
target_height = 1324
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'right'
date_font_size = 70
title_font_size = 36
laa_fontsize = 14
mask_colour='#EEEEEE'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.6
overlay_positions = [[32,340]]
overlay_filenames = ['relative_key.png']
background_file=''
write_pickle()
#Rate of change plot - 14 day
short_name="relative28"
footer_message = "Compares daily case rate in a given LTLA with same value 28 days earlier. Based on LTLA case data from coronavirus.data.gov.uk."
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
plot_ranks = False
plot_relative = True
relative_days = 28
target_places = []
colour_map='bwr'
msoa_colour_map='winter'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [133000,658000,10600,655000]
label_x=654000
label_y=576000
title_x=654000
title_y=605000
plot_msoa_boundaries=True
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "28-day rate of change in C19 Cases"
laa_linewidth= 0.6
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = True
add_title = True
target_width = 1080
target_height = 1324
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'right'
date_font_size = 70
title_font_size = 36
laa_fontsize = 14
mask_colour='#EEEEEE'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.6
overlay_positions = [[32,340]]
overlay_filenames = ['relative_key.png']
background_file=''
write_pickle()
#MSOA Only Plot
short_name="msoa"
footer_message = "Based on MSOA case data from coronavirus.data.gov.uk."
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
plot_ranks = False
plot_relative = False
target_places = []
colour_map='YlOrRd'
msoa_colour_map='winter'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [133000,658000,10600,655000]
label_x=654000
label_y=576000
title_x=654000
title_y=605000
plot_msoa_boundaries=True
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "MSOA Covid Outbreaks in England"
laa_linewidth= 0.6
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = False
add_title = True
target_width = 1080
target_height = 1324
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = True
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'right'
date_font_size = 70
title_font_size = 38
laa_fontsize = 14
mask_colour='#EEEEEE'
add_footer = True
restrict_laa_to_targets = False
f_scale = 1.8
overlay_positions = []
overlay_filenames = []
background_file=''
write_pickle()
#LSOA Only Plot
short_name="lsoa"
footer_message = "Based on LSOA case data from coronavirus.data.gov.uk."
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
plot_ranks = False
plot_relative = False
target_places = []
colour_map='YlOrRd'
msoa_colour_map='YlOrRd'
lsoa_colour_map='summer'
msoa_alpha=1
lsoa_alpha=1
frame_margins = [133000,658000,10600,655000]
label_x=654000
label_y=576000
title_x=654000
title_y=605000
plot_msoa_boundaries=True
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "LSOA Covid Outbreaks in England"
laa_linewidth= 0.5
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = False
add_title = True
target_width = 1080
target_height = 1324
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = True
plot_combined_data = False
text_align_mode = 'right'
date_font_size = 70
title_font_size = 38
laa_fontsize = 14
mask_colour='#EEEEEE'
add_footer = True
restrict_laa_to_targets = False
f_scale = 1.8
overlay_positions = []
overlay_filenames = []
background_file=''
write_pickle()
#LSOA data on top of MSOA\LTLA combi Plot
short_name="combined"
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
footer_message = "Based on LTLA, MSOA and LSOA data from coronavirus.data.gov.uk."
plot_ranks = False
plot_relative = False
target_places = []
colour_map='Oranges'
msoa_colour_map='YlOrRd'
lsoa_colour_map='Autumn'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [133000,658000,10600,655000]
label_x=654000
label_y=576000
title_x=654000
title_y=605000
plot_msoa_boundaries=True
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "Local Covid Outbreaks in England"
laa_linewidth= 0.5
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = False
add_title = True
target_width = 1080
target_height = 1324
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = True
plot_combined_data = True
text_align_mode = 'right'
date_font_size = 66
title_font_size = 38
laa_fontsize = 14
mask_colour='#EEEEEE'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2
overlay_positions = []
overlay_filenames = []
background_file=''
write_pickle()
#East England
short_name="east"
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
target_places = []
footer_message = "Based on LTLA and MSOA case data from coronavirus.data.gov.uk."
plot_ranks = False
plot_relative = False
colour_map='YlOrRd'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=1
frame_margins = [500000,659000,160000,360000]
label_x=655000
label_y=193000
title_x=655000
title_y=350000
plot_msoa_boundaries=True
plot_wales=False
plot_scotland=False
plot_towns=True
plot_laa = True
title_string = "Heatmap for East of England"
laa_linewidth= 2
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 8
transparent = False
add_date = True
add_background = False
add_overlay = False
add_title = True
target_width = 1080
target_height = 1354
plot_laa_names=True
plot_laa_values=True
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = True
text_align_mode = 'right'
date_font_size = 80
title_font_size = 60
laa_fontsize = 20
mask_colour='#122B49'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.67
overlay_positions = []
overlay_filenames=[]
background_file=''
write_pickle()
#South East England
short_name="southeast"
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
target_places = []
footer_message = "Based on LTLA and MSOA case data from coronavirus.data.gov.uk."
plot_ranks = False
plot_relative = False
colour_map='YlOrRd'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=1
frame_margins = [400000,640000,80000,240000]
label_x=637000
label_y=89000
title_x=630000
title_y=232000
plot_msoa_boundaries=True
plot_wales=False
plot_scotland=False
plot_towns=True
plot_laa = True
title_string = "Heatmap for South East England"
laa_linewidth= 2
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 8
transparent = False
add_date = True
add_background = False
add_overlay = False
add_title = True
target_width = 1616
target_height = 1080
plot_laa_names=True
plot_laa_values=True
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = True
text_align_mode = 'right'
date_font_size = 80
title_font_size = 60
laa_fontsize = 16
mask_colour='#122B49'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.5
overlay_positions = []
overlay_filenames=[]
background_file=''
write_pickle()
#Greater London Area
short_name="london"
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
target_places = []
footer_message = "Based on LTLA and MSOA case data from coronavirus.data.gov.uk."
plot_ranks = False
plot_relative = False
colour_map='YlOrRd'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=1
frame_margins = [500000,560000,160000,200000]
label_x=502000
label_y=192000
title_x=502000
title_y=195000
plot_msoa_boundaries=True
plot_wales=False
plot_scotland=False
plot_towns=True
plot_laa = True
title_string = "Heatmap for London"
laa_linewidth= 2
standalone_plot = True
post_process = True
resize_output = False
heat_lim = 8
transparent = False
add_date = True
add_background = False
add_overlay = False
add_title = True
target_width = 1616
target_height = 1080
plot_laa_names=True
plot_laa_values=True
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = True
text_align_mode = 'left'
date_font_size = 80
title_font_size = 60
laa_fontsize = 20
mask_colour='#122B49'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.4
overlay_positions = []
overlay_filenames=[]
background_file=''
write_pickle()
#Midlands
short_name="midlands"
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
target_places = []
footer_message = "Based on LTLA and MSOA case data from coronavirus.data.gov.uk."
plot_ranks = False
plot_relative = False
colour_map='YlOrRd'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=1
frame_margins = [315000,560000,210000,412000]
label_x=317000
label_y=392000
title_x=317000
title_y=402000
plot_msoa_boundaries=True
plot_wales=True
plot_scotland=False
plot_towns=True
plot_laa = True
title_string = "Heatmap for Midlands"
laa_linewidth= 2
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 8
transparent = False
add_date = True
add_background = False
add_overlay = False
add_title = True
target_width = 1308
target_height = 1080
plot_laa_names=True
plot_laa_values=True
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = True
text_align_mode = 'left'
date_font_size = 80
title_font_size = 60
laa_fontsize = 22
mask_colour='#DDDDDD'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.3
overlay_positions = []
overlay_filenames=[]
background_file=''
write_pickle()
#Yorkshire+Humber Region
short_name="yorkshire"
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
target_places = []
footer_message = "Based on LTLA and MSOA case data from coronavirus.data.gov.uk."
plot_ranks = False
plot_relative = False
colour_map='YlOrRd'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=1
frame_margins = [360000,560000,390000,525000]
label_x=556000
label_y=505000
title_x=556000
title_y=515000
plot_msoa_boundaries=True
plot_wales=True
plot_scotland=False
plot_towns=True
plot_laa = True
title_string = "Heatmap for Yorkshire and Humber"
laa_linewidth= 2
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 8
transparent = False
add_date = True
add_background = False
add_overlay = False
add_title = True
target_width = 1594
target_height = 1080
plot_laa_names=True
plot_laa_values=True
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = True
text_align_mode = 'right'
date_font_size = 80
title_font_size = 40
laa_fontsize = 20
mask_colour='#DDDDDD'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.4
overlay_positions = []
overlay_filename=[]
background_file=''
write_pickle()
#South West England
short_name="southwest"
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
target_places = []
footer_message = "Based on LTLA and MSOA case data from coronavirus.data.gov.uk."
plot_ranks = False
plot_relative = False
colour_map='YlOrRd'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=1
frame_margins = [133000,465000,10600,254000]
label_x=143000
label_y=165000
title_x=298000
title_y=42000
plot_msoa_boundaries=True
plot_wales=True
plot_scotland=False
plot_towns=True
plot_laa = True
title_string = "Heatmap for South West England"
laa_linewidth= 1.2
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 6
transparent = False
add_date = True
add_background = False
add_overlay = False
add_title = True
target_width = 1474
target_height = 1080
plot_laa_names=True
plot_laa_values=True
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = True
text_align_mode = 'left'
date_font_size = 80
title_font_size = 60
laa_fontsize = 20
mask_colour='#122B49'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.5
overlay_positions = []
overlay_filenames=[]
background_file=''
write_pickle()
#North East England
short_name="northeast"
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
target_places = []
footer_message = "Based on LTLA and MSOA case data from coronavirus.data.gov.uk."
plot_ranks = False
plot_relative = False
colour_map='YlOrRd'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=1
frame_margins = [354000,480000,500000,660000]
label_x=477000
label_y=640000
title_x=477000
title_y=650000
plot_msoa_boundaries=True
plot_wales=False
plot_scotland=True
plot_towns=True
plot_laa = True
title_string = "Heatmap for North East England"
laa_linewidth= 2
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = False
add_title = True
target_width = 1080
target_height = 1370
plot_laa_names=True
plot_laa_values=True
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = True
text_align_mode = 'right'
date_font_size = 80
title_font_size = 50
laa_fontsize = 22
mask_colour='#DDDDDD'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.3
overlay_filenames=[]
overlay_positions = []
background_file=''
write_pickle()
#North West England
short_name="northwest"
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
target_places = []
footer_message = "Based on LTLA and MSOA case data from coronavirus.data.gov.uk."
plot_ranks = False
plot_relative = False
colour_map='YlOrRd'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=1
frame_margins = [285000,415000,350000,585000]
label_x=288000
label_y=565000
title_x=288000
title_y=575000
plot_msoa_boundaries=True
plot_wales=True
plot_scotland=True
plot_towns=True
plot_laa = True
title_string = "Heatmap for North West England"
laa_linewidth= 2
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = False
add_title = True
target_width = 1068
target_height = 1920
plot_laa_names=True
plot_laa_values=True
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = True
text_align_mode = 'left'
date_font_size = 60
title_font_size = 40
laa_fontsize = 20
mask_colour='#DDDDDD'
add_footer = True
restrict_laa_to_targets = False
f_scale = 3.1
overlay_filenames=[]
overlay_positions = []
background_file=''
write_pickle()
#North Yorkshire
short_name="northyorkshire"
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
footer_message = "Based on LTLA and MSOA case data from coronavirus.data.gov.uk."
plot_ranks = False
plot_relative = False
frame_margins = [360000,520000,412000,520000]
target_places = ['Craven','Harrogate','Richmondshire','Hambleton','Ryedale','Scarborough','York','Selby']
colour_map='Wistia'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=1
label_x=518000
label_y=514500
title_x=300000
title_y=50000
plot_msoa_boundaries=True
plot_wales=False
plot_scotland=False
plot_towns=True
plot_laa = True
title_string = ""
laa_linewidth= 2
resize_output = True
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 8
transparent = False
add_date = True
add_background = False
add_overlay = True
add_title = False
target_width = 1594
target_height = 1080
plot_laa_names=True
plot_laa_values=True
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = True
text_align_mode = 'right'
date_font_size = 60
title_font_size = 60
laa_fontsize=40
mask_colour='#122B49'
add_footer = False
restrict_laa_to_targets = True
f_scale = 2.5
overlay_filenames=['graphics/overlay-nyorks.png']
overlay_positions = [[0,0]]
background_file=''
write_pickle()
#North Yorkshire
short_name="nyorks-lsoa"
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
footer_message = "Based on LTLA, MSOA and LSOA data from coronavirus.data.gov.uk."
plot_ranks = False
plot_relative = False
frame_margins = [360000,520000,412000,520000]
target_places = ['Craven','Harrogate','Richmondshire','Hambleton','Ryedale','Scarborough','York','Selby']
colour_map='BuGn'
msoa_colour_map='Summer'
lsoa_colour_map='Wistia'
colour_map='Oranges'
msoa_colour_map='Reds'
lsoa_colour_map='autumn'
msoa_alpha=0.2
lsoa_alpha=0.15
label_x=518000
label_y=514500
title_x=300000
title_y=50000
plot_msoa_boundaries=True
plot_wales=False
plot_scotland=False
plot_towns=True
plot_laa = True
title_string = ""
laa_linewidth= 2
resize_output = True
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 8
transparent = False
add_date = True
add_background = False
add_overlay = True
add_title = False
target_width = 1594
target_height = 1080
plot_laa_names=True
plot_laa_values=True
plot_ltla_data = True
plot_msoa_data = True
plot_lsoa_data = True
plot_combined_data = False
text_align_mode = 'right'
date_font_size = 60
title_font_size = 60
laa_fontsize=40
mask_colour='#122B49'
add_footer = False
restrict_laa_to_targets = True
f_scale = 2.5
overlay_filenames=['graphics/overlay-nyorks.png']
overlay_positions = []
background_file=''
write_pickle()
short_name="doubling-kent"
footer_message = ""
plot_classified_ltla = True
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = 'manual'
ltla_classifier_bins = [1,2,4,8,16,32,64,128,256,10000]
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='jet'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [520000,640000,100000,190000]
label_x=637000
label_y=178000
title_x=637000
title_y=185000
plot_msoa_boundaries=False
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "Covid-19 Case Rate in Kent Area"
laa_linewidth= 0.6
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = True
add_title = True
target_width = 1600
target_height = 1200
plot_laa_names=True
plot_laa_values=True
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'right'
date_font_size = 76
title_font_size = 56
laa_fontsize = 22
mask_colour='#EEEEEEAA'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.7
overlay_filenames = ['doubling-key.png']
overlay_positions = [[1150,828]]
background_file = "ltla-bg.png"
write_pickle()
short_name="doubling-surrey"
footer_message = ""
plot_classified_ltla = True
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = 'manual'
ltla_classifier_bins = [1,2,4,8,16,32,64,128,256,10000]
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='jet'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [410000,530000,100000,190000]
label_x=527000
label_y=178000
title_x=527000
title_y=185000
plot_msoa_boundaries=False
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "Covid-19 Case Rate in Hampshire+Surrey Area"
laa_linewidth= 0.6
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = True
add_title = True
target_width = 1600
target_height = 1200
plot_laa_names=True
plot_laa_values=True
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'right'
date_font_size = 76
title_font_size = 40
laa_fontsize = 22
mask_colour='#EEEEEEAA'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.7
overlay_filenames = ['doubling-key.png']
overlay_positions = [[1150,828]]
background_file = "ltla-bg.png"
write_pickle()
#North Yorkshire
short_name="nyorks-bin"
plot_classified_ltla = True
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = 'manual'
ltla_classifier_bins = [1,2,4,8,16,32,64,128,256,10000]
footer_message = "Based on LTLA and MSOA data from coronavirus.data.gov.uk."
plot_ranks = False
plot_relative = False
frame_margins = [360000,520000,412000,520000]
target_places = ['Craven','Harrogate','Richmondshire','Hambleton','Ryedale','Scarborough','York','Selby']
colour_map='jet'
msoa_colour_map='Summer'
lsoa_colour_map='Wistia'
msoa_colour_map='autumn'
lsoa_colour_map='autumn'
msoa_alpha=0.01
lsoa_alpha=0.15
label_x=518000
label_y=514500
title_x=300000
title_y=50000
plot_msoa_boundaries=True
plot_wales=False
plot_scotland=False
plot_towns=True
plot_laa = True
title_string = ""
laa_linewidth= 2
resize_output = True
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 8
transparent = False
add_date = True
add_background = False
add_overlay = True
add_title = False
target_width = 1594
target_height = 1080
plot_laa_names=True
plot_laa_values=True
plot_ltla_data = False
plot_msoa_data = True
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'right'
date_font_size = 60
title_font_size = 60
laa_fontsize=40
mask_colour='#122B49'
add_footer = False
restrict_laa_to_targets = True
f_scale = 2.5
overlay_filenames=['graphics/overlay-nyorks.png','doubling-msoa-key.png']
overlay_positions = [[0,0],[1140,640]]
background_file=''
write_pickle()
#Rate of change plot for comparison with SAGE recommendation
short_name="relative19"
footer_message = "Comparison of case rate on 09/10/2020 with 20/09/2020. Based on LTLA case data from coronavirus.data.gov.uk."
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
plot_ranks = False
plot_relative = True
relative_days = 14
target_places = []
colour_map='bwr'
msoa_colour_map='winter'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [133000,658000,10600,655000]
label_x=654000
label_y=576000
title_x=654000
title_y=605000
plot_msoa_boundaries=True
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "Change in C-19 Case Rate Since SAGE Recommended Circuit-Breaker"
laa_linewidth= 0.6
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 10
transparent = False
add_date = True
add_background = False
add_overlay = True
add_title = True
target_width = 1080
target_height = 1324
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'right'
date_font_size = 70
title_font_size = 30
laa_fontsize = 14
mask_colour='#EEEEEE'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.6
overlay_positions = [[32,340]]
overlay_filenames = ['relative_key.png']
background_file=''
write_pickle()
#Risk Weighted
short_name="age-risk-unweighted"
footer_message = "Based on LTLA case data from coronavirus.data.gov.uk."
#footer_message = "Based on comparison of LTLA age distribution to national average weighted with C19 fatality statistics. Data from ONS and PHE."
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='PuRd'
colour_map='jet'
msoa_colour_map='winter'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [133000,658000,10600,655000]
label_x=654000
label_y=576000
title_x=654000
title_y=605000
plot_msoa_boundaries=True
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "Unweighted Heatmap"
#title_string = "Weighted risk of C19 death in LTLA due to age demographics"
laa_linewidth= 0.6
sqrt_rates = True
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 320
ltla_vmax = 20
transparent = False
add_date = True
add_background = False
add_overlay = True
add_title = True
target_width = 1080
target_height = 1324
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = True
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'right'
date_font_size = 70
title_font_size = 44
laa_fontsize = 14
mask_colour='#EEEEEE'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.5
overlay_positions = [[32,390]]
overlay_filenames = ['sqr-key.png']
background_file=''
write_pickle()
#Risk Weighted
short_name="age-risk"
footer_message = "Based on LTLA case data from coronavirus.data.gov.uk."
#footer_message = "Based on comparison of LTLA age distribution to national average weighted with C19 fatality statistics. Data from ONS and PHE."
plot_classified_ltla = False
plot_risk_weighted_ltla = True
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='PuRd'
colour_map='jet'
msoa_colour_map='winter'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [133000,658000,10600,655000]
label_x=654000
label_y=576000
title_x=654000
title_y=605000
plot_msoa_boundaries=True
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "Weighted Age-Risk Heatmap"
#title_string = "Weighted risk of C19 death in LTLA due to age demographics"
laa_linewidth= 0.6
sqrt_rates = True
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 320
ltla_vmax = 20
transparent = False
add_date = True
add_background = False
add_overlay = True
add_title = True
target_width = 1080
target_height = 1324
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'right'
date_font_size = 70
title_font_size = 44
laa_fontsize = 14
mask_colour='#EEEEEE'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.5
overlay_positions = []
overlay_filenames = []
background_file=''
write_pickle()
#Risk Weighted
short_name="age-risk-unweighted-bin"
footer_message = "Based on LTLA case data from coronavirus.data.gov.uk."
#footer_message = "Based on comparison of LTLA age distribution to national average weighted with C19 fatality statistics. Data from ONS and PHE."
plot_classified_ltla = True
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = 'manual'
ltla_classifier_bins = [0.1,5,10,15,30,45,90,180,360,10000]
sqrt_rates = False
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='jet'
msoa_colour_map='winter'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [133000,658000,10600,655000]
label_x=654000
label_y=576000
title_x=654000
title_y=605000
plot_msoa_boundaries=True
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "Unweighted Heatmap"
#title_string = "Weighted risk of C19 death in LTLA due to age demographics"
laa_linewidth= 0.6
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 320
ltla_vmax = 400
transparent = False
add_date = True
add_background = False
add_overlay = True
add_title = True
target_width = 1080
target_height = 1324
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = True
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'right'
date_font_size = 70
title_font_size = 44
laa_fontsize = 14
mask_colour='#EEEEEE'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.5
overlay_positions = [[32,390]]
overlay_filenames = ['risk-factor-key.png']
background_file=''
write_pickle()
#Risk Weighted
short_name="age-risk-bin"
footer_message = "Based on LTLA case data from coronavirus.data.gov.uk."
#footer_message = "Based on comparison of LTLA age distribution to national average weighted with C19 fatality statistics. Data from ONS and PHE."
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = True
ltla_classifier_mode = 'manual'
ltla_classifier_bins = [0.1,5,10,15,30,45,90,180,360,10000]
sqrt_rates = False
plot_ranks = False
plot_relative = False
relative_days = 7
target_places = []
colour_map='jet'
msoa_colour_map='winter'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=0.4
frame_margins = [133000,658000,10600,655000]
label_x=654000
label_y=576000
title_x=654000
title_y=605000
plot_msoa_boundaries=True
plot_wales=True
plot_scotland=True
plot_towns=False
plot_laa = True
title_string = "Weighted Age-Risk Heatmap"
#title_string = "Weighted risk of C19 death in LTLA due to age demographics"
laa_linewidth= 0.6
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 320
ltla_vmax = 400
transparent = False
add_date = True
add_background = False
add_overlay = True
add_title = True
target_width = 1080
target_height = 1324
plot_laa_names=False
plot_laa_values=False
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = False
text_align_mode = 'right'
date_font_size = 70
title_font_size = 44
laa_fontsize = 14
mask_colour='#EEEEEE'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.5
overlay_positions = []
overlay_filenames = []
background_file=''
write_pickle()
#Norfolk
short_name="norfolk"
plot_classified_ltla = False
plot_risk_weighted_ltla = False
plot_risk_weighted_ltla_binned = False
ltla_classifier_mode = None
ltla_classifier_bins = None
target_places = []
footer_message = "Based on LTLA and MSOA case data from coronavirus.data.gov.uk."
plot_ranks = False
plot_relative = False
colour_map='Purples'
msoa_colour_map='YlOrRd'
lsoa_colour_map='YlOrRd'
msoa_alpha=1
lsoa_alpha=1
frame_margins = [559000,659000,252000,352000]
label_x=655000
label_y=342000
title_x=655000
title_y=347000
plot_msoa_boundaries=True
plot_wales=False
plot_scotland=False
plot_towns=True
plot_laa = True
title_string = "C-19 Case Rate Heatmap for Norfolk Area"
laa_linewidth= 2
standalone_plot = True
post_process = True
resize_output = True
heat_lim = 6
transparent = False
add_date = True
add_background = False
add_overlay = False
add_title = True
target_width = 1080
target_height = 1080
plot_laa_names=True
plot_laa_values=True
plot_ltla_data = False
plot_msoa_data = False
plot_lsoa_data = False
plot_combined_data = True
text_align_mode = 'right'
date_font_size = 80
title_font_size = 60
laa_fontsize = 32
mask_colour='#122B49'
add_footer = True
restrict_laa_to_targets = False
f_scale = 2.3
overlay_positions = []
overlay_filenames=[]
background_file=''
write_pickle()
| 23.230192
| 398
| 0.808692
| 8,927
| 55,706
| 4.692058
| 0.048281
| 0.070692
| 0.033209
| 0.038199
| 0.926515
| 0.917443
| 0.912835
| 0.90615
| 0.895383
| 0.888961
| 0
| 0.067941
| 0.111424
| 55,706
| 2,398
| 399
| 23.230192
| 0.778258
| 0.039816
| 0
| 0.917337
| 1
| 0.001787
| 0.104088
| 0.019747
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000447
| false
| 0
| 0.000447
| 0
| 0.000894
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2e56793063819897092b9282b387a3271b40601a
| 26,037
|
py
|
Python
|
calc.py
|
Aakash007-ai/Calce_and_Currency_converter
|
27ca827d2e1aecae60b8355c1eeec087cae501cd
|
[
"CC0-1.0"
] | 1
|
2020-11-17T12:21:48.000Z
|
2020-11-17T12:21:48.000Z
|
calc.py
|
Aakash007-ai/Calce_and_Currency_converter
|
27ca827d2e1aecae60b8355c1eeec087cae501cd
|
[
"CC0-1.0"
] | 1
|
2020-11-16T16:51:44.000Z
|
2020-11-16T16:51:44.000Z
|
calc.py
|
Aakash007-ai/Calce_and_Currency_converter
|
27ca827d2e1aecae60b8355c1eeec087cae501cd
|
[
"CC0-1.0"
] | 1
|
2020-11-17T04:31:33.000Z
|
2020-11-17T04:31:33.000Z
|
from tkinter import *;
from tkinter import messagebox;
import numpy as np
import math
def actionauthor():
messagebox.showinfo("IT Workshop Project")
#Check weather the input string is a number or not
def is_number(s):
if(s != ''):
if (s.replace('.', '', 1).isdigit()):
return True
if (s.isdigit()):
return True;
if s[0] in ['-', '+', '.', '0', ' ']:
if (s[1] == '.'):
if (s[2:].isdigit()):
return True
if (s[1] == '0' and s[2] == '.'):
if (s[3:].isdigit()):
return True
if s[1:].isdigit():
return True;
return False;
def casting(num):
if('.' in num):
return float(num);
else:
return int(num)
#Plus sign function
def actionPlus():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='red', bg='#9ed8ee')
Showtemplabel.insert(0, '+');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0";
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
num2 = Numberentry2.get();
if(is_number(num1) == True and is_number(num2) == True and num1 != ' ' and num2 != ' '):
num1 = casting(num1);
num2 = casting(num2);
ans = str(num1 + num2);
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='red', bg='#9ed8ee')
Showtemplabel.insert(0, '+');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
#Minus sign function
def actionMinus():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='green', bg='#ece7e2')
Showtemplabel.insert(0, '-');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0";
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
num2 = Numberentry2.get();
if(is_number(num1)==True and is_number(num2)==True):
num1 = casting(num1);
num2 = casting(num2);
ans = str(num1 - num2);
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='green', bg='#ece7e2')
Showtemplabel.insert(0, '-');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
#Multiplication sign function
def actionMul():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='blue', bg='#cacba9')
Showtemplabel.insert(0, 'x');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
num2 = Numberentry2.get();
if(is_number(num1)==True and is_number(num2)==True):
num1 = casting(num1);
num2 = casting(num2);
ans = str(num1 * num2);
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='blue', bg='#cacba9')
Showtemplabel.insert(0, 'x');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
#Division sign function
def actionDiv():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, '/');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
num2 = Numberentry2.get();
if(is_number(num1)==True and is_number(num2)==True):
num1 = casting(num1);
num2 = casting(num2);
ans = str(num1 / num2);
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, '/');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
#Percentage sign function
def action_percent():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, '%');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
num2 = Numberentry2.get();
if(is_number(num1)==True and is_number(num2)==True):
num1 = casting(num1);
num2 = casting(num2);
ans = str((num1/100) * num2);
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, '%');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
#Trigonometric Functions
#Cosine
def action_cos():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'cos');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
if(is_number(num1)==True):
num1 = casting(num1)
ans = str(np.cos(num1))
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'cos');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
# Sine Function
def action_sin():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'sin');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
if(is_number(num1)==True):
num1 = casting(num1)
ans = str(np.sin(num1))
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'sin');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
# Tangent Function
def action_tan():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'tan');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
if(is_number(num1)==True):
num1 = casting(num1)
ans = str(np.tan(num1))
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'tan');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
# Inverse Trigonometric Functions
# Arc cosine Function
def action_acos():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'acos');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
if(is_number(num1)==True):
num1 = casting(num1)
ans = str(np.acos(num1))
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'cos_-1');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
# Arc sine function
def action_asin():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'asin');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
if(is_number(num1)==True):
num1 = casting(num1)
ans = str(np.asin(num1))
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'sin_-1');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
# Arc tangent function
def action_atan():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'atan');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='atan')
num1 = Numberentry1.get();
if(is_number(num1)==True):
num1 = casting(num1)
ans = str(np.atan(num1))
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'tan_-1');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
# Logarithmic Function
def action_ln():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'log_e');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
if(is_number(num1)==True):
num1 = casting(num1)
ans = str(np.log(num1))
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'log_e');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
# Log Function(log base 10)
def action_log():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'log');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
if(is_number(num1)==True):
num1 = casting(num1)
ans = str(math.log(num1)/math.log(10))
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'log_10');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
# Power Function
def action_power():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'power');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
num2 = Numberentry2.get();
if(is_number(num1)==True and is_number(num2)==True):
num1 = casting(num1);
num2 = casting(num2);
ans = str(num1 ** num2);
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'power');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
# Square Function
def action_sq():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'square');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
if(is_number(num1)==True):
num1 = casting(num1);
ans = str(num1 ** 2);
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'square');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
# Square-root Function
def action_sq_root():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'sq_root');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
if(is_number(num1)==True):
num1 = casting(num1);
ans = str(num1 ** 0.5);
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'sq_root');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
# Exponential Function
def action_exp():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'anti_ln');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
if(is_number(num1)==True):
num1 = casting(num1)
ans = str(math.exp(num1))
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'anti_ln');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
# Antilog Function(10 raised to the power entered number)
def action_antilog():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'antilog');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
if(is_number(num1)==True):
num1 = casting(num1);
ans = str(10 ** num1);
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'Antilog');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
# Pie function
def action_pi():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'Division');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
ans = str(np.pi)
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'Division');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
# Floor Function
def action_floor:
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'antilog');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
if(is_number(num1)==True):
num1 = casting(num1)
ans = str(math.floor(num1))
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'Division');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
#Ceil Function
def action_ceil():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'antilog');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
if(is_number(num1)==True):
num1 = casting(num1)
ans = str(math.ceil(num1))
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'Division');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
#Factorial Function
def factorial(n):
if(n==0):
return 1
elif(n>=1):
return(n*factorial(n-1))
else:
return("Error")
def action_fact():
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'antilog');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
ans = "0"
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
num1 = Numberentry1.get();
if(is_number(num1)==True):
num1 = casting(num1)
ans = str(factorial(num1))
Showtemplabel.delete(0, END);
Showlabel.delete(0, END)
Showtemplabel.config(fg='yellow', bg='#8dad96')
Showtemplabel.insert(0, 'Division');
Showtemplabel.place(relx=0.5, rely=0.5, anchor='center')
Showlabel.insert(0, ans);
Showlabel.place(relx=0.5, rely=0.6, anchor='center')
else:
messagebox.showerror("Error", "Enter a Valid number\ne.g. 123, 0.123, .123, -0.123, 123.456")
# Main Window
root = Tk()
root.title('Python Calculator')
root.geometry('380x300+200+250')
Titlelabel = Label(root, fg = 'green' , font = 'none 10 bold underline' ,text = 'Python Calculator', compound = CENTER)
Titlelabel.place(relx=0.5, rely=0.1, anchor='center')
Showlabel = Entry(root);
Showtemplabel = Entry(root);
Numberentry1 = Entry(root);
Numberentry2 = Entry(root);
Numberentry1.place(relx=0.5, rely=0.3, anchor='center')
Numberentry2.place(relx=0.5, rely=0.4, anchor='center')
# Operation Buttons(+,-,x,/,%)
plusbutton = Button(root, text="+", width = 5, command = actionPlus);
plusbutton.place(relx=0.1, rely=0.7)
minusbutton = Button(root, text="-", width = 5, command = actionMinus);
minusbutton.place(relx=0.3, rely=0.7)
mulbutton = Button(root, text="*", width = 5, command = actionMul);
mulbutton.place(relx=0.5, rely=0.7)
divbutton = Button(root, text="/", width = 5, command = actionDiv);
divbutton.place(relx=0.7, rely=0.7)
authorbutton = Button(root, text='Author', width=6, command = actionauthor);
authorbutton.place(relx = 0.5, rely=0.95, anchor='center');
percent_button = Button(root, text="%", width = 5, command = action_percent)
percent_button.place(relx=0.9, rely=0.7)
# Buttons for Trigonometric Functions(cos,sin,tan)
cos_button = Button(root, text="cos", width = 5, command = action_cos)
cos_button.place(relx=1.1, rely=0.7)
sin_button = Button(root, text="sin", width = 5, command = action_sin)
sin_button.place(relx=1.3, rely=0.7)
tan_button = Button(root, text="tan", width = 5, command = action_tan)
tan_button.place(relx=1.5, rely=0.7)
# Buttons for Inverse Trigonometric Functions(cos,sin,tan)
acos_button = Button(root, text="acos", width = 5, command = action_acos)
acos_button.place(relx=2.1, rely=0.7)
asin_button = Button(root, text="asin", width = 5, command = action_asin)
asin_button.place(relx=2.3, rely=0.7)
atan_button = Button(root, text="atan", width = 5, command = action_atan)
atan_button.place(relx=2.5, rely=0.7)
#Logarithmic Functions
ln_button = Button(root, text="ln", width = 5, command = action_ln)
ln_button.place(relx=2.7, rely=0.7)
log_button = Button(root, text="log", width = 5, command = action_log)
log_button.place(relx=2.9, rely=0.7)
# Power Buttons
pow_button = Button(root, text="power", width = 5, command = action_power)
pow_button.place(relx=3.1, rely=0.7)
sq_button = Button(root, text="^2", width = 5, command = action_sq)
sq_button.place(relx=3.3, rely=0.7)
sq_root_button = Button(root, text="^1/2", width = 5, command = action_sq_root)
sq_root_button.place(relx=3.5, rely=0.7)
# Exponential Button
exp_button = Button(root, text="exp", width = 5, command = action_exp)
exp_button.place(relx=3.7, rely=0.7)
# Antilog Button
antilog_button = Button(root, text="antilog", width = 5, command = action_exp)
antilog_button.place(relx=3.9, rely=0.7)
# Pi button
pi_button = Button(root, text="pi", width = 5, command = action_pi)
pi_button.place(relx=4.1, rely=0.7)
# Floor and Ceil Function
floor_button = Button(root, text="floor", width = 5, command = action_floor)
floor_button.place(relx=4.3, rely=0.7)
ceil_button = Button(root, text="ceil", width = 5, command = action_ceil)
ceil_button.place(relx=4.5, rely=0.7)
# Factorial Button
fact_button = Button(root, text="fact", width = 5, command = action_fact)
fact_button.place(relx=4.7, rely=0.7)
root.resizable(False, False);
root.mainloop();
| 32.184178
| 120
| 0.60318
| 3,478
| 26,037
| 4.482749
| 0.049166
| 0.017703
| 0.062215
| 0.065615
| 0.799179
| 0.787762
| 0.768456
| 0.768456
| 0.768456
| 0.768456
| 0
| 0.071785
| 0.229558
| 26,037
| 808
| 121
| 32.22401
| 0.705434
| 0.032147
| 0
| 0.758562
| 0
| 0.035959
| 0.11841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.006849
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2e58e1ca9f81353b919c5210c9bbf66a4e588600
| 70
|
py
|
Python
|
analyze/__init__.py
|
twooopark/analysis_fb
|
4e23d691b4628ecd99d8eb5c88a6ac7d94e84bf8
|
[
"MIT"
] | null | null | null |
analyze/__init__.py
|
twooopark/analysis_fb
|
4e23d691b4628ecd99d8eb5c88a6ac7d94e84bf8
|
[
"MIT"
] | null | null | null |
analyze/__init__.py
|
twooopark/analysis_fb
|
4e23d691b4628ecd99d8eb5c88a6ac7d94e84bf8
|
[
"MIT"
] | null | null | null |
from .analyzer import count_wordfreq
from .analyzer import json_to_str
| 35
| 36
| 0.871429
| 11
| 70
| 5.272727
| 0.727273
| 0.413793
| 0.62069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 70
| 2
| 37
| 35
| 0.920635
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2e6dbde3cdb55aeb6606f87c9aa9ce52ffb61976
| 1,443
|
py
|
Python
|
ckanext/accesscontrol/logic/auth.py
|
SAEONData/ckanext-openidconnect
|
8bc21cddd2a2445eb6ea117e8fb02a06b3d9d5ed
|
[
"MIT"
] | null | null | null |
ckanext/accesscontrol/logic/auth.py
|
SAEONData/ckanext-openidconnect
|
8bc21cddd2a2445eb6ea117e8fb02a06b3d9d5ed
|
[
"MIT"
] | 3
|
2018-10-17T15:33:05.000Z
|
2018-11-01T13:13:47.000Z
|
ckanext/accesscontrol/logic/auth.py
|
SAEONData/ckanext-accesscontrol
|
8bc21cddd2a2445eb6ea117e8fb02a06b3d9d5ed
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
def user_privilege_check(context, data_dict):
# sysadmins only
return {'success': False}
def role_create(context, data_dict):
# sysadmins only
return {'success': False}
def role_update(context, data_dict):
# sysadmins only
return {'success': False}
def role_delete(context, data_dict):
# sysadmins only
return {'success': False}
def role_show(context, data_dict):
# sysadmins only
return {'success': False}
def role_list(context, data_dict):
# sysadmins only
return {'success': False}
def role_permission_grant(context, data_dict):
# sysadmins only
return {'success': False}
def role_permission_revoke(context, data_dict):
# sysadmins only
return {'success': False}
def role_permission_list(context, data_dict):
# sysadmins only
return {'success': False}
def user_role_assign(context, data_dict):
# sysadmins only
return {'success': False}
def user_role_unassign(context, data_dict):
# sysadmins only
return {'success': False}
def permission_list(context, data_dict):
# sysadmins only
return {'success': False}
def permission_define(context, data_dict):
# sysadmins only
return {'success': False}
def permission_undefine(context, data_dict):
# sysadmins only
return {'success': False}
def permission_delete_all(context, data_dict):
# sysadmins only
return {'success': False}
| 18.74026
| 47
| 0.694387
| 175
| 1,443
| 5.514286
| 0.165714
| 0.170984
| 0.233161
| 0.373057
| 0.893264
| 0.893264
| 0.893264
| 0.893264
| 0.845596
| 0.845596
| 0
| 0.000866
| 0.199584
| 1,443
| 76
| 48
| 18.986842
| 0.834632
| 0.16632
| 0
| 0.5
| 0
| 0
| 0.088533
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 11
|
5cfc9736b6b0c6f08fd65a7e764fcd9a09566b03
| 56,322
|
py
|
Python
|
web/transiq/restapi/tests/tests_booking_status_chain.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | null | null | null |
web/transiq/restapi/tests/tests_booking_status_chain.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | 14
|
2020-06-05T23:06:45.000Z
|
2022-03-12T00:00:18.000Z
|
web/transiq/restapi/tests/tests_booking_status_chain.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | null | null | null |
import json
from django.contrib.auth.models import User
from django.urls import reverse
from model_mommy import mommy
from rest_framework import status
from rest_framework.test import APITestCase
from authentication.models import Profile
from restapi.models import BookingStatuses
class BookingStatusChainTests(APITestCase):
def setUp(self):
self.login_url = reverse('login')
self.logout_url = reverse('logout')
self.bookingstatuschainlist_url = reverse('booking_status_chain_list/')
self.bookingstatuschaincreate_url = reverse('booking_status_chain_create/')
self.user = User.objects.create_user(username='john_doe',
email='harshadasawant89@gmail.com',
password='abc12345')
Profile.objects.create(
user=self.user,
name='John_Doe',
phone='9619125174',
)
self.login_data = self.client.post(self.login_url, {'username': 'john_doe', 'password': 'abc12345'}).content
self.login_data = json.loads(self.login_data.decode('utf8'))
self.token = 'Token {}'.format(self.login_data['token'])
self.bookingstatus = mommy.make(BookingStatuses, status='lr_generated')
self.bookingstatus_id = self.bookingstatus.id
self.primarypreceded = mommy.make(BookingStatuses, status='loaded')
self.primarypreceded_id = self.primarypreceded.id
self.primarysucceeded = mommy.make(BookingStatuses, status='unloaded')
self.primarysucceeded_id= self.primarysucceeded.id
self.secondarypreceded = mommy.make(BookingStatuses, status='loaded')
self.secondarypreceded_id = self.primarypreceded.id
self.secondarysucceeded = mommy.make(BookingStatuses, status='advance_paid')
self.secondarysucceeded_id = self.primarysucceeded.id
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.bookingstatuschaincreate_url,
json.dumps({"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.bookingstatuschain_id = response.data['data']['id']
class BookingStatusChainCreateTests(BookingStatusChainTests):
def setUp(self):
super().setUp()
self.bookingstatus = mommy.make(BookingStatuses, status='lr_generated')
self.bookingstatus_id = self.bookingstatus.id
self.primarypreceded = mommy.make(BookingStatuses, status='loaded')
self.primarypreceded_id = self.primarypreceded.id
self.primarysucceeded = mommy.make(BookingStatuses, status='unloaded')
self.primarysucceeded_id = self.primarysucceeded.id
self.secondarypreceded = mommy.make(BookingStatuses, status='loaded')
self.secondarypreceded_id = self.primarypreceded.id
self.secondarysucceeded = mommy.make(BookingStatuses, status='advance_paid')
self.secondarysucceeded_id = self.primarysucceeded.id
"""
Test ID:TS01TD00140
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-create/
Status:failure
Message:wrong method
Status code:405
"""
def test_booking_status_create_405_wrong_method(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get(self.bookingstatuschaincreate_url,
{"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"},
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
"""
Test ID:TS01TD00141
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-create/
Status:failure
Message:no header
Status code:401
"""
def test_booking_status_create_401_no_header(self):
self.client.credentials()
response = self.client.post(self.bookingstatuschaincreate_url,
{"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"},
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Authentication credentials were not provided.")
"""
Test ID:TS01TD00142
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-create/
Status:failure
Message:expired header
Status code:401
"""
def test_booking_status_create_401_expired_header(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.delete(self.logout_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.post(self.bookingstatuschaincreate_url,
{"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"},
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01TD00142
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-create/
Status:failure
Message:wrong token
Status code:401
"""
def test_booking_status_create_401_wrong_token(self):
token = 'Token c8b0f520fa7380fe7090131a4456859b1efc7777'
self.client.credentials(HTTP_AUTHORIZATION=token)
response = self.client.post(self.bookingstatuschaincreate_url,
json.dumps({"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01TD00143
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-create/
Status:failure
Message:wrong content type
Status code:415
"""
def test_booking_status_create_415_header_with_wrong_content_type(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.bookingstatuschaincreate_url,
json.dumps({"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"}),
content_type='application/pdf')
self.assertEqual(response.status_code, status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
"""
Test ID:TS01TD00144
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-create/
Status:failure
Message:blank body
Status code:400
"""
def test_booking_status_create_400_blank_body(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.bookingstatuschaincreate_url,
json.dumps({}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['booking_status_id'][0], "This field is required.")
self.assertEqual(response.data['level'][0], "This field is required.")
self.assertEqual(response.data['primary_preceded_booking_status_id'][0], "This field is required.")
self.assertEqual(response.data['primary_succeeded_booking_status_id'][0], "This field is required.")
self.assertEqual(response.data['secondary_preceded_booking_status_id'][0], "This field is required.")
self.assertEqual(response.data['secondary_succeeded_booking_status_id'][0], "This field is required.")
"""
Test ID:TS01TD00145
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-create/
Status:failure
Message:blank booking status id
Status code:400
"""
def test_booking_status_create_400_blank_booking_status_id(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.bookingstatuschaincreate_url,
json.dumps({"booking_status_id": "",
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['booking_status_id'][0], "A valid integer is required.")
"""
Test ID:TS01TD00146
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-create/
Status:failure
Message:corrupt body
Status code:400
"""
def test_booking_status_create_400_corrupt_body(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.bookingstatuschaincreate_url,
json.dumps({"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": "jdksdjk",
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": "hjgdhgsd",
"level": "primary"}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['primary_succeeded_booking_status_id'][0], "A valid integer is required.")
self.assertEqual(response.data['secondary_succeeded_booking_status_id'][0], "A valid integer is required.")
"""
Test ID:TS01TD00147
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-create/
Status:failure
Message:wrong booking status id
Status code:400
"""
def test_booking_status_create_400_wrong_booking_status_id(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.bookingstatuschaincreate_url,
{"booking_status_id": 6000,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"},
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
"""
Test ID:TS01TD00148
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-create/
Status:failure
Message:duplicate data
Status code:400
"""
def test_booking_status_chain_create_400_duplicate_data(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.bookingstatuschaincreate_url,
json.dumps({"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.bookingstatuschaincreate_url,
json.dumps({"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
"""
Test ID:TS01TD00149
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-create/
Status:success
Message:chain create
Status code:201
"""
def test_booking_status_chain_create_201(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.bookingstatuschaincreate_url,
json.dumps({"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['status'], "success")
self.assertEqual(response.data['msg'], "Booking Status Chain Created")
class BookingStatusChainRetrieveTests(BookingStatusChainTests):
"""
Test ID:TS01TD00160
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-retreive/
Status:failure
Message:wrong method
Status code:405
"""
def test_booking_status_chain_retrieve_405_wrong_method(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post("/api/booking-status-chain-retrieve/{}/".format(self.bookingstatuschain_id),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
"""
Test ID:TS01TD00161
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-retreive/
Status:failure
Message:no header
Status code:401
"""
def test_booking_status_chain_retrieve_401_no_header(self):
self.client.credentials()
response = self.client.get("/api/booking-status-chain-retrieve/{}/".format(self.bookingstatuschain_id),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Authentication credentials were not provided.")
"""
Test ID:TS01TD00162
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-retreive/
Status:failure
Message:expired header
Status code:401
"""
def test_booking_status_chain_retrieve_401_expired_header(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.delete(self.logout_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.get("/api/booking-status-chain-retrieve/{}/".format(self.bookingstatuschain_id),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01TD00162
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-retreive/
Status:failure
Message:wrong token
Status code:401
"""
def test_booking_status_chain_retrieve_401_wrong_token(self):
token = 'Token c8b0f520fa7380fe7090131a4456859b1efc7777'
self.client.credentials(HTTP_AUTHORIZATION=token)
response = self.client.get("/api/booking-status-chain-retrieve/{}/".format(self.bookingstatuschain_id),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01TD00163
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-retreive/
Status:failure
Message:wrong id
Status code:404
"""
def test_booking_status_chain_retrieve_404_wrong_id(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
bookingstatuschain_id = 10000
response = self.client.get("/api/booking-status-chain-retrieve/{}/".format(bookingstatuschain_id),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data['detail'], "Not found.")
"""
Test ID:TS01TD00164
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-retreive/
Status:success
Message:retreived
Status code:200
"""
def test_booking_status_chain_retrieve_200(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/booking-status-chain-retrieve/{}/".format(self.bookingstatuschain_id),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['status'], "success")
self.assertEqual(response.data['msg'], "Booking Status Chain Retrieved")
class BookingStatusChainUpdateTests(BookingStatusChainTests):
"""
Test ID:TS01TD00150
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-update/
Status:failure
Message:wrong method
Status code:405
"""
def test_booking_status_update_405_wrong_method(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/booking-status-chain-update/{}/".format(self.bookingstatuschain_id),
{"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"},
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
"""
Test ID:TS01TD00151
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-update/
Status:failure
Message:no header
Status code:401
"""
def test_booking_status_update_401_no_header(self):
self.client.credentials()
response = self.client.post("/api/booking-status-chain-update/{}/".format(self.bookingstatuschain_id),
{"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"},
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Authentication credentials were not provided.")
"""
Test ID:TS01TD00152
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-update/
Status:failure
Message:expired header
Status code:401
"""
def test_booking_status_update_401_expired_header(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.delete(self.logout_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.post("/api/booking-status-chain-update/{}/".format(self.bookingstatuschain_id),
json.dumps({"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01TD00152
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-update/
Status:failure
Message:wrong token
Status code:401
"""
def test_booking_status_update_401_wrong_token(self):
token = 'Token c8b0f520fa7380fe7090131a4456859b1efc7777'
self.client.credentials(HTTP_AUTHORIZATION=token)
response = self.client.post("/api/booking-status-chain-update/{}/".format(self.bookingstatuschain_id),
json.dumps({"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01TD00153
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-update/
Status:failure
Message:wrong content type
Status code:415
"""
def test_booking_status_update_415_header_with_wrong_content_type(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post("/api/booking-status-chain-update/{}/".format(self.bookingstatuschain_id),
json.dumps({"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"}),
content_type='application/pdf')
self.assertEqual(response.status_code, status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
"""
Test ID:TS01TD00154
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-update/
Status:failure
Message:blank body
Status code:400
"""
def test_booking_status_updatete_400_blank_body(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post("/api/booking-status-chain-update/{}/".format(self.bookingstatuschain_id),
json.dumps({}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['booking_status_id'][0], "This field is required.")
self.assertEqual(response.data['level'][0], "This field is required.")
self.assertEqual(response.data['primary_preceded_booking_status_id'][0], "This field is required.")
self.assertEqual(response.data['primary_succeeded_booking_status_id'][0], "This field is required.")
self.assertEqual(response.data['secondary_preceded_booking_status_id'][0], "This field is required.")
self.assertEqual(response.data['secondary_succeeded_booking_status_id'][0], "This field is required.")
"""
Test ID:TS01TD00155
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-update/
Status:failure
Message:blank booking status id
Status code:400
"""
def test_booking_status_update_400_blank_booking_status_id(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post("/api/booking-status-chain-update/{}/".format(self.bookingstatuschain_id),
json.dumps({"booking_status_id": "",
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['booking_status_id'][0], "A valid integer is required.")
"""
Test ID:TS01TD00156
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-update/
Status:failure
Message:corrupt body
Status code:400
"""
def test_booking_status_update_400_corrupt_body(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post("/api/booking-status-chain-update/{}/".format(self.bookingstatuschain_id),
json.dumps({"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": "jdksdjk",
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": "hjgdhgsd",
"level": "primary"}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['primary_succeeded_booking_status_id'][0], "A valid integer is required.")
self.assertEqual(response.data['secondary_succeeded_booking_status_id'][0], "A valid integer is required.")
"""
Test ID:TS01TD00157
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-update/
Status:failure
Message:wrong booking status id
Status code:400
"""
def test_booking_status_update_400_wrong_booking_status_id(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post("/api/booking-status-chain-update/{}/".format(self.bookingstatuschain_id),
{"booking_status_id": 6000,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"},
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
"""
Test ID:TS01TD00158
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-update/
Status:failure
Message:wrong id
Status code:400
"""
def test_booking_status_chain_update_400_wrong_id(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
bookingstatuschain_id = 10000
response = self.client.post("/api/booking-status-chain-update/{}/".format(bookingstatuschain_id),
json.dumps({"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['status'], "BookingStatusChain Doesn't exists")
"""
Test ID:TS01TD00159
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-update/
Status:success
Message:updated
Status code:201
"""
def test_booking_status_chain_update_202(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post("/api/booking-status-chain-update/{}/".format(self.bookingstatuschain_id),
json.dumps({"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEqual(response.data['status'], "success")
self.assertEqual(response.data['msg'], "Booking Status Chain Updated")
class BookingStatusChainDestroyTests(BookingStatusChainTests):
"""
Test ID:TS01TD00165
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-destroy/<Valid ID>/
Status:failure
Message:wrong method
Status code:405
"""
def test_booking_status_chain_destroy_405_wrong_method(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/booking-status-chain-destroy/{}/".format(self.bookingstatuschain_id),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
"""
Test ID:TS01TD00166
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-destroy/<Valid ID>/
Status:failure
Message:no header
Status code:401
"""
def test_booking_status_chain_destroy_401_no_header(self):
self.client.credentials()
response = self.client.post("/api/booking-status-chain-destroy/{}/".format(self.bookingstatuschain_id),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Authentication credentials were not provided.")
"""
Test ID:TS01TD00167
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-destroy/<Valid ID>/
Status:failure
Message:expired header
Status code:401
"""
def test_booking_status_chain_destroy_401_expired_header(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.delete(self.logout_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.post("/api/booking-status-chain-destroy/{}/".format(self.bookingstatuschain_id),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01TD00167
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-destroy/<Valid ID>/
Status:failure
Message:wrong token
Status code:401
"""
def test_booking_status_chain_destroy_401_wrong_token(self):
token = 'Token c8b0f520fa7380fe7090131a4456859b1efc7777'
self.client.credentials(HTTP_AUTHORIZATION=token)
response = self.client.post("/api/booking-status-chain-destroy/{}/".format(self.bookingstatuschain_id),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01TD00168
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-destroy/<Valid ID>/
Status:failure
Message:wrong id
Status code:404
"""
def test_booking_status_chain_destroy_404_wrong_id(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
bookingstatuschain_id = 1000
response = self.client.post("/api/booking-status-chain-destroy/{}/".format(bookingstatuschain_id),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data['detail'], "Not found.")
"""
Test ID:TS01TD00169
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-destroy/<Valid ID>/
Status:failure
Message:destroy
Status code:200
"""
def test_booking_status_chain_destroy_200(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post("/api/booking-status-chain-destroy/{}/".format(self.bookingstatuschain_id),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['status'], "success")
self.assertEqual(response.data['msg'], "Booking Status Chain Deleted")
class BookingStatusChainListTests(BookingStatusChainTests):
def setUp(self):
super().setUp()
self.bookingstatus = mommy.make(BookingStatuses, status='lr_generated')
self.bookingstatus_id = self.bookingstatus.id
self.primarypreceded = mommy.make(BookingStatuses, status='loaded')
self.primarypreceded_id = self.primarypreceded.id
self.primarysucceeded = mommy.make(BookingStatuses, status='unloaded')
self.primarysucceeded_id = self.primarysucceeded.id
self.secondarypreceded = mommy.make(BookingStatuses, status='loaded')
self.secondarypreceded_id = self.primarypreceded.id
self.secondarysucceeded = mommy.make(BookingStatuses, status='advance_paid')
self.secondarysucceeded_id = self.primarysucceeded.id
"""
Test ID:TS01TD00170
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-list/
Status:failure
Message:wrong method
Status code:405
"""
def test_booking_status_chain_list_405_wrong_method(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.bookingstatuschainlist_url, content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
"""
Test ID:TS01TD00171
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-list/
Status:failure
Message:no header
Status code:401
"""
def test_booking_status_chain_list_401_no_header(self):
self.client.credentials()
response = self.client.get(self.bookingstatuschainlist_url, content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Authentication credentials were not provided.")
"""
Test ID:TS01TD00172
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-list/
Status:failure
Message:expired header
Status code:401
"""
def test_booking_status_chain_list_401_expired_header(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.delete(self.logout_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.post(self.bookingstatuschainlist_url, content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01TD00172
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-list/
Status:failure
Message:wrong token
Status code:401
"""
def test_booking_status_chain_list_401_wrong_token(self):
token = 'Token c8b0f520fa7380fe7090131a4456859b1efc7777'
self.client.credentials(HTTP_AUTHORIZATION=token)
response = self.client.get(self.bookingstatuschainlist_url, content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01TD00173
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-list/
Status:success
Message:booking status chain list
Status code:200
"""
def test_booking_status_chain_list_200(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get(self.bookingstatuschainlist_url, content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['status'], "success")
self.assertEqual(response.data['msg'], "Booking Status Chain List")
"""
Test ID:TS01TD00174
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-list/
Status:success
Message:booking status chain list search with valid data
Status code:200
"""
def test_booking_status_chain_list_200_search_with_valid_data(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.bookingstatuschaincreate_url,
json.dumps({"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
employeeroles_id = response.data['data']['id']
response = self.client.get("/api/booking-status-chain-list/?search=lr_generated",
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.data['data'][0]['booking_status']
self.assertEqual(data, 'Lr Generated')
response = self.client.get("/api/booking-status-chain-list/?search=primary",
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.data['data'][0]['level']
self.assertEqual(data, 'primary')
response = self.client.get("/api/booking-status-chain-list/?search={}".format(employeeroles_id),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.data['data'][0]['id']
self.assertEqual(data, employeeroles_id)
self.assertEqual(response.data['status'], "success")
self.assertEqual(response.data['msg'], "Booking Status Chain List")
"""
Test ID:TS01TD00175
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-list/
Status:success
Message:booking status chain list search with invalid data
Status code:200
"""
def test_booking_status_chain_list_200_search_with_invalid_data(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/booking-status-chain-list/?search=hjgsas",
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.data['data']
self.assertEqual(data, [])
response = self.client.get("/api/booking-status-chain-list/?search=svgahm",
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.data['data']
self.assertEqual(data, [])
response = self.client.get("/api/booking-status-chain-list/?search=100000",
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.data['data']
self.assertEqual(data, [])
self.assertEqual(response.data['status'], "success")
self.assertEqual(response.data['msg'], "Booking Status Chain List")
"""
Test ID:TS01TD00176
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-list/
Status:success
Message:booking status chain list valid id
Status code:200
"""
def test_booking_status_chain_list_200_valid_id(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.bookingstatuschaincreate_url,
json.dumps({"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
employeeroles_id = response.data['data']['id']
response = self.client.get("/api/booking-status-chain-list/?id={}".format(employeeroles_id),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.data['data'][0]['id']
self.assertEqual(data, employeeroles_id)
self.assertEqual(response.data['status'], "success")
self.assertEqual(response.data['msg'], "Booking Status Chain List")
"""
Test ID:TS01TD00177
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-list/
Status:success
Message:booking status chain list invalid id
Status code:200
"""
def test_booking_status_chain_list_200_invalid_id(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/booking-status-chain-list/?id=28829",
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.data['data']
self.assertEqual(data, [])
self.assertEqual(response.data['status'], "success")
self.assertEqual(response.data['msg'], "Booking Status Chain List")
"""
Test ID:TS01TD00178
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-list/
Status:success
Message:booking status chain list valid booking status
Status code:200
"""
def test_booking_status_chain_list_200_valid_booking_status(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.bookingstatuschaincreate_url,
json.dumps({"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.get("/api/booking-status-chain-list/?booking_status=lr_generated",
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.data['data'][0]['booking_status']
self.assertEqual(data, 'Lr Generated')
self.assertEqual(response.data['status'], "success")
self.assertEqual(response.data['msg'], "Booking Status Chain List")
"""
Test ID:TS01TD00179
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-list/
Status:success
Message:booking status chain list invalid booking status
Status code:200
"""
def test_booking_status_chain_list_200_invalid_booking_status(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/booking-status-chain-list/?booking_status=udhuedlie",
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.data['data']
self.assertEqual(data, [])
self.assertEqual(response.data['status'], "success")
self.assertEqual(response.data['msg'], "Booking Status Chain List")
"""
Test ID:TS01TD00180
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-list/
Status:success
Message:booking status chain list valid level
Status code:200
"""
def test_booking_status_chain_list_200_valid_level(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.bookingstatuschaincreate_url,
json.dumps({"booking_status_id": self.bookingstatus_id,
"primary_preceded_booking_status_id": self.primarypreceded_id,
"primary_succeeded_booking_status_id": self.primarysucceeded_id,
"secondary_preceded_booking_status_id": self.secondarypreceded_id,
"secondary_succeeded_booking_status_id": self.secondarysucceeded_id,
"level": "primary"}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.get("/api/booking-status-chain-list/?level=primary",
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.data['data'][0]['level']
self.assertEqual(data, 'primary')
self.assertEqual(response.data['status'], "success")
self.assertEqual(response.data['msg'], "Booking Status Chain List")
"""
Test ID:TS01TD00181
Created By:Hari
Created On:13/12/2018
Scenario:booking-status-chain-list/
Status:success
Message:booking status chain list invalid level
Status code:200
"""
def test_booking_status_chain_list_200_invalid_level(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/booking-status-chain-list/?level=hjghdbs",
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.data['data']
self.assertEqual(data, [])
self.assertEqual(response.data['status'], "success")
self.assertEqual(response.data['msg'], "Booking Status Chain List")
| 48.679343
| 116
| 0.613188
| 5,556
| 56,322
| 5.965983
| 0.038697
| 0.123541
| 0.06969
| 0.072224
| 0.94998
| 0.946783
| 0.94458
| 0.94458
| 0.937641
| 0.937098
| 0
| 0.035187
| 0.297095
| 56,322
| 1,156
| 117
| 48.721453
| 0.802091
| 0.008114
| 0
| 0.825719
| 0
| 0
| 0.208542
| 0.12745
| 0
| 0
| 0
| 0
| 0.230118
| 1
| 0.084602
| false
| 0.003384
| 0.013536
| 0
| 0.108291
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cf31841c1d37fbffe8fea51e15ca4a81c13f5a10
| 206
|
py
|
Python
|
github_poster/html_parser/__init__.py
|
xiaominglui/GitHubPoster
|
8a8772a65cdcb439c453f5eb8df70c22219edcbf
|
[
"MIT"
] | 963
|
2021-04-28T03:57:35.000Z
|
2022-03-30T16:04:30.000Z
|
github_poster/html_parser/__init__.py
|
xiaominglui/GitHubPoster
|
8a8772a65cdcb439c453f5eb8df70c22219edcbf
|
[
"MIT"
] | 67
|
2022-01-17T06:05:11.000Z
|
2022-03-31T10:01:45.000Z
|
github_poster/html_parser/__init__.py
|
xiaominglui/GitHubPoster
|
8a8772a65cdcb439c453f5eb8df70c22219edcbf
|
[
"MIT"
] | 139
|
2021-04-28T05:02:50.000Z
|
2022-03-19T13:40:48.000Z
|
from .github_parser import GitHubParser
from .gitlab_parser import GitLabParser
from .kindle_parser import parse_kindle_text_to_list
__all__ = ("GitHubParser", "GitLabParser", "parse_kindle_text_to_list")
| 34.333333
| 71
| 0.84466
| 27
| 206
| 5.888889
| 0.481481
| 0.226415
| 0.188679
| 0.213836
| 0.264151
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087379
| 206
| 5
| 72
| 41.2
| 0.845745
| 0
| 0
| 0
| 0
| 0
| 0.237864
| 0.121359
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d8523b062ebfbef8185457750f022924de983258
| 2,667
|
py
|
Python
|
classes/OLHWDB2_2650.py
|
stq-hydra/CASIA_HW_data_processing
|
23d1737655148c932b86f580e3ed496c5a3065b4
|
[
"Apache-2.0"
] | 1
|
2021-10-21T01:35:56.000Z
|
2021-10-21T01:35:56.000Z
|
classes/OLHWDB2_2650.py
|
stq-hydra/CASIA_HW_data_processing
|
23d1737655148c932b86f580e3ed496c5a3065b4
|
[
"Apache-2.0"
] | null | null | null |
classes/OLHWDB2_2650.py
|
stq-hydra/CASIA_HW_data_processing
|
23d1737655148c932b86f580e3ed496c5a3065b4
|
[
"Apache-2.0"
] | null | null | null |
alphabet = """20年以来,国内企业家包括许多著名在涉嫌违法犯罪被捕入狱的人数不断增此方面报道也屡见诸极端。是哪个抓了就判或者这案子开庭那决总之几乎月都有样新闻落马、刑甚至死执行媒体关注焦点往律问题而更经营和管理上发表各种意乏济学却很少专参与讨论一正常现象存什么最终结局如果走进监由院重要应该调查显示党政干部读书三特:接受网络等阅速度加快;为习工作需时间对级规定认同民坛杂志社频联合“惯”近日公布统计据通过占到6.9%仅次于籍85但比例已超纸41说明程深相较传质及电视可达效吗?树仔菜毒危机海南(天绿香)指金属镉标变检测并未准水西瓜风波用红药染色黑美优良品因谣言打冷宫辟声中平息7蕉致癌巴拿病含类似SAR讹吃省农厅先后遭挫热带起任何吹草动产陷呢令思督缺失助长豪华衙门攀央纪委展改革察财土资源建设审署七前下《办楼堂馆所项目清知》地区修全彻底早今两会出将严刹消短成实高率必整肃吏治惩腐败控本心北京铁路暑运台期预送旅客3万能安排哈尔滨大连烟向临自止共根流情况去峰生游观光休假疗养主集青岛市交负责介绍站街条件限基础施足影响居节黄周春还隐患编制完综划从步系造首东桥保留侧梯换梁无障碍素细化随着饭店蓬勃星记博鳌束评员届培训议获悉复核形式暗访辅考软量范司满宏卫世界第巨场空挑战他硬轻普遍雨河兰州段又污活垃圾十里午导顺洪涌小湖附始岸半幅绵延塑料袋皮饮瓶浩荡漂宽处竟环境告诉没好单位手边输宁厂提供组字岁儿童亡事故分析看般乘车其骑均每伤害原谐汽绝忽我们驾驶教育力气漏洞太莱坞老牌女莎朗斯际张曼玉韩男李俊演山田群甄丹迅震舒淇晓夏…晚聚幕夜毯采取守穿—装阵冯刚当冲领号队伍杰伦锋灌篮剧远势越菲宾LE则依然争奇斗艳身袭透衣搭配白裙粱珞蛋糕亮头镂低胸服性感型逊户文务批启口立研副武召露九代非二元城乡登住努积稳妥推直辖藏席措列态护使沿线得外具吸引别善待校招录即陆续强格禁利做章求切备汇放策降证确信科技钢职士举己府验些历候班须承担权耕亿亩紧围绕坚持促容粮食脆弱坝缓坡淤沙趋真织典冥王义矮衔福尼亚迈克·版究厄迪丝诺威夷凯望镜算颗转轨迹借密约苍穹戏众赏宝座塔火伴遮掩轩辕四景称灶神盛份厚礼顶才升球°置左右航N揭架纱它替退帮宇宙诞顿物詹姆韦尺寸模欧洲构耗倍六径米阳板罗―鲁照温谱仪寿命齐湿解扎龙鹤江库植丰富芦苇沼泽栖繁殖禽蔽洼甸鸟广阔泊磁圈翰杜石块矿封闭惊讶探估免暴毁灭破坏史券涨驱股易盖层矛盾昨融简旦价链裂缩呆账锁反予防险创竞压跨递挂钩奖罚劣费浮盈亏郭践谈袁听维乃储蓄恰搬狼味虽话想骤谓款减币净官瑞瓦商授劳沃初迄宗旨域逐渐突纯双军映莫纽私募Cerbus收购勒戴团拆-纷猜另离梅赛德奔驰销售独且绪楚洋雅裁扩拓渠略额差异渗妆袖羽拥尊牛背棉花遇纺恢难补贴五把握奏既镍钴铂族甘川氧硫昌铜床累叶邻赫彭木片角术谜旬漠诚疆孙鸿烈刘枢久煤蒲县鼎鹏汾康洗苏溧械签协酸剂艺废咐册井盐毫/鱼虾雪霜像!`季觉逮朝勾帖迷室囚状蓝叛々籽帐坟精纲甬兴筑娄辐敬移滋援阶G振写陈序滑隔亲朱弛矢牙琪她回幻姿倚廊栏呼唤墙恩爱羞颤音晕庞陶醉佳夕千曲绡畔瓯尽杯你凄激昂银炬烧浪覆静卧闪焰触怜微叹刃躯剑颓俱倒缭乱雷掌炎扣押余旧胞谊友救呈急幄兄勋鉴请蒋钧载吴廉惟敌派返某挥睦幸蔡廷锴梗否敢矣卅胡铭藻函云贤吾奸藉颇猖獗诱稍疏燎谭秀宪晤匪鸡图倾曾邬姑讳亟沂蒙识绊鞋芳荟萃怎啊偏佬抑脸皱纹婆慢抬扫瞄裤凡丁碎衫掠腰瞬怔材柔晰朴宛株园竹丽番韵继偷眼射叫呵赵臣船击乔铺兵冒寒跳抢锤桩固码刺骨泥泞滩肩扛百斤八奋房艰苦攻靠锹铲镐刨冻只拳窝值晨轴串血泡磨茧臂疼抖筷掉坐艇鼾残酷剥削佃横愤怒丑恶徒篡夺贯扮恬耻捏骗鬼祖父匠屋敞棚雇盲轧碾卖租盘篇耀详阐刊答氏榷汤宣伎俩〈〉辩圣辉师贫穷仍肆虐踏欺盗剽窃语旗轮功佛圆播拼凑惑钱邪奥乌帝诫歪糟蹋妄仰趣芒描摹秃笔夹叙述辰乐玩便粗陌喜颠熟脱跑野譬鸽笼旷萧闲乍泓吟哦咏舞蹈闹撼衡遂惝怳洛赋悦淑兮怡夫曰曹毕支斥慨杀翼孤兆允哲沈舜恳添闯枪侈寡肯撤焚狈逃英泳孝挟庙避冈林峻弥漫屏巫卜僧古充凉姻恐隆戚贺丧吊疾焉躬讼尝尤厉辞诘若霆摧悍沮酒殷勤笑散苟秉勇君痘医痤疮肤肾腺脂泌旺毛囊壁丙杆菌症胃肠紊肪摄①锌萝②B溢谢酶瘦猪肉肺兔鸭鲫蘑菇耳芹菠苋莴笋冬柿豆芽莲藕梨桑椹柚楂苹津润燥肥熏蒸肌油脑芝麻辛辣浓茶咖啡椒蒜韭狗雀忌鲜紫荆森歌唱猕猴氛虎豹猛兽追赶欢跃羚羊斑鹿顽砾器暖烤吓填饱肚茎猎凶棒扒鼠忍饥挨饿禹舟茂汶念寺顾盼折蛇睡壑哗啸泻虬飞腾骇惜瞰貌倘俯雾巍峨尾枕沟莽彩池粼犹鳞甲悟迎寻溯迥颜嵌镶陡玲珑剔晶莹澈缘胆畏搏悲壮雄伟希腊诗刻WFight凛狂舵霍履郎DJonw怀虔蔼脾欣羡鄙锦涛讯益裴姜瑜讲伙除困松遗 ̄钙卵巢雌酗丢跌母孩婚恋愿爸妈碰宋龄按摩秘牧痛健穴痊愈脊柱脏适液末梢腑匹袜锥针犁耧锄耙套鞭酱醋糖碱葱芥摸选货嗓夸赔给溜徕炕饼啪摔擀杖炒粉锅哧啦扑扯拉韧甩拽顷煎柜堆甜蜜舅甥岳丈婿灾荒佩帽徽贵峭枯茫丛蟹爬獾狐择脚挖茅厩吧咴鸣伊腿阿襟概骋尖秋摘娇卸烂埝塄蕾翠捋拔拧绳悬辫蒂罢阴汁醇挣抱撒窑槛枝刀凿晌割伞荫棋摆娃瞅淡炉柴禾嘴涩嚼栈拐弯鼻翅吼朵竖撞惧缰浑哆嗦躺胀鹰啄睛忙嘶抹掀扔塞蹬厌踢墟掘址蚌埠郊涡淮涂村壳灰坑吉试勘Pd买aplfO享c互怪湾汉尚弹投练澎宜截港秦皇庆亦错误循索拜飨谋宦魄湛丘阉TVm吨姓停绩课舆徐萍邀永餐祝妇迟再赢趟印浏览跟刷归昶恒烛翻×钟撑炮勺垛垒傅膀衩汗涔榴瞥枚秒眨喊轰伸铸铅惨峦迭嶂崇岭薄缠滔漳钻甭掂磅扶钎昏灯狠砸叮怯汀巷赤娘挤爽惠嘈舀齿梳拗粘絮扁桶吆喝擦叠簿懒票拾桌碗稀桨糊粥旁缆警炸扬拱宠敦灵魂尉Hky呎荣碑吞聂寝豚哺乳啮厘胖肢腹披棕偶趾蹼爪吻钝裸犬痕拣脖捡倔叔饲抽牲喂屁晃惹混够呛拢侄懵啥呀珠瞪撸叉胳膊汹盯捅虚葫堡征渡湃捣剩筒峪沉艘缴俘虏粪卢桂亭砍镰胜叭磕棱绣咧薯咋俺霞‘’摇拨鼓喀喉咙咱仗斩怕晒搽膏胭让尘渴找奶冰柠檬镇喘椅哼窗莺梦罩悄崖拖帆趁缕艄桅盒画踪忧郁洁哭恨渺智慧溷浊逼窄羁晋耐幼奚绘迫役燕损硕嗡忠娟俄献煌伯窟毅敏挺词擂俗蓦逢俨拘孪媚姝○伐抵潟泰坦慈焕冤枉檀仙颖玛托柏巧妙潘符忆弄辨荃疑牵谅墩磷纤虹暨驻携董誓谁沫释贬琼熊励涸逾沛旱誉~眠荧蛙妮骚龟虫蚂蚁昆膘泉泛滥獭愁距畜撕咬嬉盆圳奕雕睹盟IU胁濒帕戈谷乞埃殿澳礁墨哥蝴蝶榜矫卡庄唇饰御萎扰遥镕弗纳滕爆零稿箱碳骄裔陵埋葬鄂税扈逝耿冢奉祭祀潜惶孰魏邦潮嘎仁氮拟瞒嫖赌奢糜涣畅亨挡舰崩溃纵瓷悠蕴舸唯葆钓贩扳庐寄侍凤孔傍黛阙瀑沓凌窥苔琴芙蓉垓敖甫辚弓箭爷妻咸拦霄裹戍杞陇申卒嫁烦啾琵琶迁郡湓浦铮倡穆衰贾悯沦憔悴徙谪句赠浔枫荻瑟欲弦浸忘宴眉捻霓裳幺幽咽凝暂歇浆迸帛插敛蟆坊妒钿篦弟姨暮啼泪阑唧涯僻宅鹃猿哀岂笛呕哑嘲哳皆泣舫鞍摱沧稼"""
| 2,667
| 2,667
| 0.985002
| 39
| 2,667
| 67.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004503
| 0.00075
| 2,667
| 1
| 2,667
| 2,667
| 0.981238
| 0
| 0
| 0
| 0
| 1
| 0.993253
| 0.993253
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2b4bc72b0e8156e26f94be9625285fdac61eda96
| 2,588
|
py
|
Python
|
tests/integration/test_bookmarks.py
|
KamilRizatdinov/marvelowe-server
|
d93bfaf8f3af5da3301f61aee23c2bf3e176916e
|
[
"MIT"
] | null | null | null |
tests/integration/test_bookmarks.py
|
KamilRizatdinov/marvelowe-server
|
d93bfaf8f3af5da3301f61aee23c2bf3e176916e
|
[
"MIT"
] | 6
|
2022-03-14T14:50:02.000Z
|
2022-03-22T12:50:09.000Z
|
tests/integration/test_bookmarks.py
|
KamilRizatdinov/marwelove-server
|
d93bfaf8f3af5da3301f61aee23c2bf3e176916e
|
[
"MIT"
] | null | null | null |
from fastapi.testclient import TestClient
from src.application import app
from src.bookmarks import get_all_character_bookmarks, get_all_comics_bookmarks
client = TestClient(app)
def test_add_get_bookmark_chapter(setup_marvel_api, db_setup):
client.post("/register", data={"username": "birdi7", "password": "123"})
response = client.post("/token", data={"username": "birdi7", "password": "123"})
token = response.json()["access_token"]
response = client.post(
"/bookmark/characters/1",
data={"username": "birdi7", "password": "123"},
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json() == {}
response = client.get("/bookmark/characters", headers={"Authorization": f"Bearer {token}"})
assert response.status_code == 200
assert response.json()["character_bookmarks"] == get_all_character_bookmarks("birdi7")
def test_get_bookmark_chapter(setup_marvel_api, db_setup):
client.post("/register", data={"username": "birdi7", "password": "123"})
response = client.post("/token", data={"username": "birdi7", "password": "123"})
token = response.json()["access_token"]
response = client.get("/bookmark/characters", headers={"Authorization": f"Bearer {token}"})
assert response.status_code == 200
assert response.json()["character_bookmarks"] == []
def test_add_get_bookmark_comics_chapter(setup_marvel_api, db_setup):
client.post("/register", data={"username": "birdi7", "password": "123"})
response = client.post("/token", data={"username": "birdi7", "password": "123"})
token = response.json()["access_token"]
response = client.post(
"/bookmark/comics/1",
data={"username": "birdi7", "password": "123"},
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json() == {}
response = client.get("/bookmark/comics", headers={"Authorization": f"Bearer {token}"})
assert response.status_code == 200
assert response.json()["comic_bookmarks"] == get_all_comics_bookmarks("birdi7")
def test_get_bookmark_comics_chapter(setup_marvel_api, db_setup):
client.post("/register", data={"username": "birdi7", "password": "123"})
response = client.post("/token", data={"username": "birdi7", "password": "123"})
token = response.json()["access_token"]
response = client.get("/bookmark/comics", headers={"Authorization": f"Bearer {token}"})
assert response.status_code == 200
assert response.json()["comic_bookmarks"] == []
| 41.079365
| 95
| 0.678903
| 295
| 2,588
| 5.772881
| 0.149153
| 0.098649
| 0.105696
| 0.152672
| 0.911333
| 0.874927
| 0.84909
| 0.84909
| 0.84909
| 0.84909
| 0
| 0.028029
| 0.145286
| 2,588
| 62
| 96
| 41.741935
| 0.741863
| 0
| 0
| 0.652174
| 0
| 0
| 0.275116
| 0.008501
| 0
| 0
| 0
| 0
| 0.26087
| 1
| 0.086957
| false
| 0.217391
| 0.065217
| 0
| 0.152174
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
9922b5dbdbb479f5fe9fec19a8346dd5fa609c93
| 8,526
|
py
|
Python
|
python/oneflow/test/graph/test_graph_optim_ftrl.py
|
Panlichen/oneflow
|
ad93c69c9932e5515aa31fb7f157073708810a3d
|
[
"Apache-2.0"
] | 1
|
2022-03-14T11:17:56.000Z
|
2022-03-14T11:17:56.000Z
|
python/oneflow/test/graph/test_graph_optim_ftrl.py
|
triple-Mu/oneflow
|
395da40885016d0b899f8a1eb87e5311a556a9b8
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test/graph/test_graph_optim_ftrl.py
|
triple-Mu/oneflow
|
395da40885016d0b899f8a1eb87e5311a556a9b8
|
[
"Apache-2.0"
] | 1
|
2021-12-15T02:14:49.000Z
|
2021-12-15T02:14:49.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
from collections import OrderedDict
import numpy as np
import copy
from test_util import GenArgList
from optimizer_test_util import clip_grad_norm_np
import oneflow as flow
from oneflow.one_embedding import Ftrl
def compare_with_numpy_ftrl(
test_case,
device,
x_shape,
learning_rate,
train_iters,
weight_decay,
lr_power,
initial_accumulator_value,
lambda1,
lambda2,
beta,
):
random_grad_seq = []
for _ in range(train_iters):
random_grad_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = np.random.uniform(size=x_shape).astype(np.float32)
class CustomModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.para0 = flow.nn.Parameter(
flow.Tensor(init_value, device=flow.device(device))
)
def forward(self, mask):
return self.para0 * mask
simp_module = CustomModule()
simp_module.to(device)
simp_module.train()
ftrl = Ftrl(
[
{
"params": simp_module.parameters(),
"lr": learning_rate,
"weight_decay": weight_decay,
"lr_power": lr_power,
"initial_accumulator_value": initial_accumulator_value,
"lambda1": lambda1,
"lambda2": lambda2,
"beta": beta,
}
]
)
class CustomftrlGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = simp_module
self.add_optimizer(ftrl)
def build(self, mask_tensor):
loss = flow.sum(self.m(mask_tensor))
loss.backward()
return loss
of_res_list = []
ftrl_graph = CustomftrlGraph()
for i in range(train_iters):
mask_tensor = flow.tensor(
random_grad_seq[i], requires_grad=False, device=flow.device(device)
)
ftrl_x = ftrl_graph(mask_tensor)
of_res_list.append(copy.copy(simp_module.para0.numpy()))
np_res_list = []
def train_by_numpy():
x = init_value
accum = np.zeros_like(x)
accum.fill(initial_accumulator_value)
z_arr = np.zeros_like(x)
def np_train_one_iter(grad):
grad = grad + weight_decay * x
new_accum = accum + grad * grad
sigma = (
np.power(new_accum, lr_power) - np.power(accum, lr_power)
) / learning_rate
new_z_val = z_arr + grad - sigma * x
update_val = (np.sign(new_z_val) * lambda1 - new_z_val) / (
(beta + np.power(new_accum, lr_power)) / learning_rate + lambda2
)
param = np.where(np.abs(new_z_val) < lambda1, 0.0, update_val)
return (param, new_accum, new_z_val)
for i in range(1, train_iters + 1):
(x, accum, z_arr) = np_train_one_iter(random_grad_seq[i - 1])
np_res_list.append(x)
return x
train_by_numpy()
test_case.assertTrue(np.allclose(of_res_list, np_res_list, rtol=1e-4, atol=1e-4))
def compare_with_numpy_ftrl_clip_grad(
test_case,
device,
x_shape,
learning_rate,
train_iters,
weight_decay,
lr_power,
initial_accumulator_value,
lambda1,
lambda2,
beta,
clip_grad_max_norm,
clip_grad_norm_type,
):
random_grad_seq = []
for _ in range(train_iters):
random_grad_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = np.random.uniform(size=x_shape).astype(np.float32)
class CustomModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.para0 = flow.nn.Parameter(
flow.Tensor(init_value, device=flow.device(device))
)
def forward(self, mask):
return self.para0 * mask
simp_module = CustomModule()
simp_module.to(device)
simp_module.train()
ftrl = Ftrl(
[
{
"params": simp_module.parameters(),
"lr": learning_rate,
"weight_decay": weight_decay,
"lr_power": lr_power,
"initial_accumulator_value": initial_accumulator_value,
"lambda1": lambda1,
"lambda2": lambda2,
"beta": beta,
"clip_grad_max_norm": clip_grad_max_norm,
"clip_grad_norm_type": clip_grad_norm_type,
}
]
)
class CustomftrlGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = simp_module
self.add_optimizer(ftrl)
def build(self, mask_tensor):
loss = flow.sum(self.m(mask_tensor))
loss.backward()
return loss
of_res_list = []
ftrl_graph = CustomftrlGraph()
for i in range(train_iters):
mask_tensor = flow.tensor(
random_grad_seq[i], requires_grad=False, device=flow.device(device)
)
ftrl_x = ftrl_graph(mask_tensor)
of_res_list.append(copy.copy(simp_module.para0.numpy()))
np_res_list = []
def train_by_numpy():
x = init_value
accum = np.zeros_like(x)
accum.fill(initial_accumulator_value)
z_arr = np.zeros_like(x)
def np_train_one_iter(grad):
norm, grad = clip_grad_norm_np(
grad, clip_grad_max_norm, clip_grad_norm_type
)
grad = grad + weight_decay * x
new_accum = accum + grad * grad
sigma = (
np.power(new_accum, lr_power) - np.power(accum, lr_power)
) / learning_rate
new_z_val = z_arr + grad - sigma * x
update_val = (np.sign(new_z_val) * lambda1 - new_z_val) / (
(beta + np.power(new_accum, lr_power)) / learning_rate + lambda2
)
param = np.where(np.abs(new_z_val) < lambda1, 0.0, update_val)
return (param, new_accum, new_z_val)
for i in range(1, train_iters + 1):
(x, accum, z_arr) = np_train_one_iter(random_grad_seq[i - 1])
np_res_list.append(x)
return x
train_by_numpy()
test_case.assertTrue(np.allclose(of_res_list, np_res_list, rtol=1e-4, atol=1e-4))
@flow.unittest.skip_unless_1n1d()
class Testftrl(flow.unittest.TestCase):
def test_ftrl(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cuda"]
if os.getenv("ONEFLOW_TEST_CPU_ONLY"):
arg_dict["device"] = ["cpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["learning_rate"] = [1, 1e-3]
arg_dict["train_iters"] = [10]
arg_dict["weight_decay"] = [0.9, 0.000]
arg_dict["lr_power"] = [-0.5, 0.5]
arg_dict["initial_accumulator_value"] = [0.1, 0.05]
arg_dict["lambda1"] = [0.01]
arg_dict["lambda2"] = [0.0, 0.01]
arg_dict["beta"] = [1.0]
for arg in GenArgList(arg_dict):
compare_with_numpy_ftrl(test_case, *arg)
def test_ftrl_clip_grad(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cuda"]
if os.getenv("ONEFLOW_TEST_CPU_ONLY"):
arg_dict["device"] = ["cpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["learning_rate"] = [1, 1e-3]
arg_dict["train_iters"] = [10]
arg_dict["weight_decay"] = [0.9, 0.000]
arg_dict["lr_power"] = [-0.5, 0.5]
arg_dict["initial_accumulator_value"] = [0.1, 0.05]
arg_dict["lambda1"] = [0.01]
arg_dict["lambda2"] = [0.0, 0.01]
arg_dict["beta"] = [1.0]
arg_dict["clip_grad_max_norm"] = [1.0]
arg_dict["clip_grad_norm_type"] = [2.0]
for arg in GenArgList(arg_dict):
compare_with_numpy_ftrl_clip_grad(test_case, *arg)
if __name__ == "__main__":
unittest.main()
| 30.341637
| 85
| 0.593831
| 1,110
| 8,526
| 4.240541
| 0.167568
| 0.04164
| 0.048863
| 0.015934
| 0.825154
| 0.823879
| 0.807946
| 0.804759
| 0.775653
| 0.775653
| 0
| 0.022015
| 0.296739
| 8,526
| 280
| 86
| 30.45
| 0.763009
| 0.068145
| 0
| 0.8
| 0
| 0
| 0.061988
| 0.017891
| 0
| 0
| 0
| 0
| 0.009091
| 1
| 0.072727
| false
| 0
| 0.040909
| 0.009091
| 0.172727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
999d9d35b7cd4ba9d51df4a9db3cde18b30fe084
| 8,584
|
py
|
Python
|
patients/tests.py
|
oreon/zenemr
|
d43fb3682979dbdfa0f16e281b043b7fc291b598
|
[
"MIT"
] | null | null | null |
patients/tests.py
|
oreon/zenemr
|
d43fb3682979dbdfa0f16e281b043b7fc291b598
|
[
"MIT"
] | null | null | null |
patients/tests.py
|
oreon/zenemr
|
d43fb3682979dbdfa0f16e281b043b7fc291b598
|
[
"MIT"
] | null | null | null |
#
#
# from django.contrib import admin
#
# from . import models
# from .commons import CustomModelAdminMixin
#
#
# class DrugTests(BaseTest):
#
# url = 'drug'
#
# fixtures = [' Drugs.json','patients.json']
#
# def test_createDrugByNotAllowed Drug(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# ''' access should be denied'''
# self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
#
# def test_createDrug(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# self.assertEqual(response.status_code, status.HTTP_201_CREATED)
#
# class CategoryTests(BaseTest):
#
# url = 'category'
#
# fixtures = [' Categorys.json','patients.json']
#
# def test_createCategoryByNotAllowed Category(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# ''' access should be denied'''
# self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
#
# def test_createCategory(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# self.assertEqual(response.status_code, status.HTTP_201_CREATED)
#
# class PatientTests(BaseTest):
#
# url = 'patient'
#
# fixtures = [' Patients.json','patients.json']
#
# def test_createPatientByNotAllowed Patient(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# ''' access should be denied'''
# self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
#
# def test_createPatient(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# self.assertEqual(response.status_code, status.HTTP_201_CREATED)
#
# class PrescriptionTests(BaseTest):
#
# url = 'prescription'
#
# fixtures = [' Prescriptions.json','patients.json']
#
# def test_createPrescriptionByNotAllowed Prescription(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# ''' access should be denied'''
# self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
#
# def test_createPrescription(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# self.assertEqual(response.status_code, status.HTTP_201_CREATED)
#
# class PrescriptionItemTests(BaseTest):
#
# url = 'prescriptionItem'
#
# fixtures = [' PrescriptionItems.json','patients.json']
#
# def test_createPrescriptionItemByNotAllowed PrescriptionItem(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# ''' access should be denied'''
# self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
#
# def test_createPrescriptionItem(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# self.assertEqual(response.status_code, status.HTTP_201_CREATED)
#
# class EmployeeTests(BaseTest):
#
# url = 'employee'
#
# fixtures = [' Employees.json','patients.json']
#
# def test_createEmployeeByNotAllowed Employee(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# ''' access should be denied'''
# self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
#
# def test_createEmployee(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# self.assertEqual(response.status_code, status.HTTP_201_CREATED)
#
# class VaccinationTests(BaseTest):
#
# url = 'vaccination'
#
# fixtures = [' Vaccinations.json','patients.json']
#
# def test_createVaccinationByNotAllowed Vaccination(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# ''' access should be denied'''
# self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
#
# def test_createVaccination(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# self.assertEqual(response.status_code, status.HTTP_201_CREATED)
#
# class EncounterTests(BaseTest):
#
# url = 'encounter'
#
# fixtures = [' Encounters.json','patients.json']
#
# def test_createEncounterByNotAllowed Encounter(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# ''' access should be denied'''
# self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
#
# def test_createEncounter(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# self.assertEqual(response.status_code, status.HTTP_201_CREATED)
#
# class VaccineTests(BaseTest):
#
# url = 'vaccine'
#
# fixtures = [' Vaccines.json','patients.json']
#
# def test_createVaccineByNotAllowed Vaccine(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# ''' access should be denied'''
# self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
#
# def test_createVaccine(self):
# self.login('alicia-rn')
# response = self.read_one_record(suffix='writable')
# data = response.data
# data['id'] = None
# response = self.client.post(self.url,data)
# print(response.data)
# self.assertEqual(response.status_code, status.HTTP_201_CREATED)
#
#
#
| 36.52766
| 76
| 0.586906
| 867
| 8,584
| 5.686275
| 0.101499
| 0.087627
| 0.047465
| 0.069371
| 0.793103
| 0.751116
| 0.751116
| 0.751116
| 0.751116
| 0.751116
| 0
| 0.008744
| 0.280522
| 8,584
| 235
| 77
| 36.52766
| 0.789508
| 0.944315
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
99ab03a35efd00985536b538386fb96bf787210b
| 1,880
|
py
|
Python
|
extensions/.stubs/clrclasses/System/Runtime/Remoting/Messaging/__init__.py
|
vicwjb/Pycad
|
7391cd694b7a91ad9f9964ec95833c1081bc1f84
|
[
"MIT"
] | 1
|
2020-03-25T03:27:24.000Z
|
2020-03-25T03:27:24.000Z
|
extensions/.stubs/clrclasses/System/Runtime/Remoting/Messaging/__init__.py
|
vicwjb/Pycad
|
7391cd694b7a91ad9f9964ec95833c1081bc1f84
|
[
"MIT"
] | null | null | null |
extensions/.stubs/clrclasses/System/Runtime/Remoting/Messaging/__init__.py
|
vicwjb/Pycad
|
7391cd694b7a91ad9f9964ec95833c1081bc1f84
|
[
"MIT"
] | null | null | null |
from __clrclasses__.System.Runtime.Remoting.Messaging import AsyncResult
from __clrclasses__.System.Runtime.Remoting.Messaging import CallContext
from __clrclasses__.System.Runtime.Remoting.Messaging import ConstructionCall
from __clrclasses__.System.Runtime.Remoting.Messaging import ConstructionResponse
from __clrclasses__.System.Runtime.Remoting.Messaging import Header
from __clrclasses__.System.Runtime.Remoting.Messaging import HeaderHandler
from __clrclasses__.System.Runtime.Remoting.Messaging import ILogicalThreadAffinative
from __clrclasses__.System.Runtime.Remoting.Messaging import IMessage
from __clrclasses__.System.Runtime.Remoting.Messaging import IMessageCtrl
from __clrclasses__.System.Runtime.Remoting.Messaging import IMessageSink
from __clrclasses__.System.Runtime.Remoting.Messaging import IMethodCallMessage
from __clrclasses__.System.Runtime.Remoting.Messaging import IMethodMessage
from __clrclasses__.System.Runtime.Remoting.Messaging import IMethodReturnMessage
from __clrclasses__.System.Runtime.Remoting.Messaging import InternalMessageWrapper
from __clrclasses__.System.Runtime.Remoting.Messaging import IRemotingFormatter
from __clrclasses__.System.Runtime.Remoting.Messaging import LogicalCallContext
from __clrclasses__.System.Runtime.Remoting.Messaging import MessageSurrogateFilter
from __clrclasses__.System.Runtime.Remoting.Messaging import MethodCall
from __clrclasses__.System.Runtime.Remoting.Messaging import MethodCallMessageWrapper
from __clrclasses__.System.Runtime.Remoting.Messaging import MethodResponse
from __clrclasses__.System.Runtime.Remoting.Messaging import MethodReturnMessageWrapper
from __clrclasses__.System.Runtime.Remoting.Messaging import OneWayAttribute
from __clrclasses__.System.Runtime.Remoting.Messaging import RemotingSurrogateSelector
from __clrclasses__.System.Runtime.Remoting.Messaging import ReturnMessage
| 75.2
| 87
| 0.897872
| 192
| 1,880
| 8.291667
| 0.161458
| 0.211055
| 0.301508
| 0.407035
| 0.753769
| 0.753769
| 0.753769
| 0
| 0
| 0
| 0
| 0
| 0.051064
| 1,880
| 24
| 88
| 78.333333
| 0.892377
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
41e028a576c41f15f923f3bec61c333e17f099dc
| 27,406
|
py
|
Python
|
stat_engine.py
|
thisuraseniya/Activity-Monitor
|
d7f6974cda4f3050aae5719388033c75678eb9da
|
[
"MIT"
] | null | null | null |
stat_engine.py
|
thisuraseniya/Activity-Monitor
|
d7f6974cda4f3050aae5719388033c75678eb9da
|
[
"MIT"
] | 3
|
2019-10-01T05:44:54.000Z
|
2022-03-12T01:00:24.000Z
|
stat_engine.py
|
thisuraseniya/Activity-Monitor
|
d7f6974cda4f3050aae5719388033c75678eb9da
|
[
"MIT"
] | null | null | null |
import sqlite3
import datetime
browsers = {}
browsers_real = {}
def give_usage(db_path, date):
global browsers, browsers_real
browsers = {'Opera': [], 'Chrome': [], 'Microsoft Edge': [], 'Iexplore': [], 'Firefox': [], 'Safari': [],
'Edge (Chromium)': []}
browsers_real = {'Opera': [], 'Chrome': [], 'Microsoft Edge': [], 'Iexplore': [], 'Firefox': [], 'Safari': [],
'Edge (Chromium)': []}
colors = [
'#1f77b4', '#2ca02c', '#d62728', '#ff7f0e', '#9467bd', '#e377c2', '#bcbd22', '#17becf', '#8c564b', '#9edae5',
'#aec7e8', '#ffbb78', '#98df8a', '#ff9896', '#c5b0d5', '#c49c94', '#f7b6d2', '#c7c7c7', '#dbdb8d', '#7f7f7f'
]
fmt = '%H:%M:%S'
idle_time = datetime.timedelta(seconds=185)
today = datetime.date.today().strftime("%Y-%m-%d")
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('SELECT * FROM tracker WHERE d="' + date + '" ORDER BY t')
data = c.fetchall()
c.execute('SELECT DISTINCT app FROM tracker WHERE d="' + date + '"')
applications = c.fetchall()
# create dictionary
apps = {}
apps_colors = {}
timeline = []
timeline_colors = []
for row in applications:
app_name = row[0].split(' - ')[0]
apps[app_name] = 0
apps_colors['Inactive'] = '#eeeeee'
apps_colors['No Data'] = '#ffffff'
if app_name not in apps_colors:
apps_colors[app_name] = colors[0]
colors.append(colors.pop(0))
try:
time_large = data[0][4].split('.')[0] # get time of 1st record
time_small = "00:00:00" # midnight
diff = datetime.datetime.strptime(time_large, fmt) - datetime.datetime.strptime(time_small, fmt) # big - small
dt = datetime.datetime.strptime(str(diff), '%H:%M:%S') # convert to H-M-S
delta = datetime.timedelta(hours=dt.hour, minutes=dt.minute, seconds=dt.second) # covert to time delta
midnight_minutes = delta.total_seconds() / 60 # convert to seconds then to minutes
if midnight_minutes > 3.1:
timeline.append(["Inactive", midnight_minutes])
else:
timeline.append([data[0][1], midnight_minutes])
except (ValueError, IndexError):
pass
x = 1
while x < len(data):
try:
time_large = data[x][4].split('.')[0] # get time of 2nd record
time_small = data[x - 1][4].split('.')[0] # get time of 1st record
diff = datetime.datetime.strptime(time_large, fmt) - datetime.datetime.strptime(time_small,
fmt) # big - small
dt = datetime.datetime.strptime(str(diff), '%H:%M:%S') # convert to H-M-S
delta = datetime.timedelta(hours=dt.hour, minutes=dt.minute, seconds=dt.second) # covert to time delta
usage_minutes = delta.total_seconds() / 60 # convert to seconds then to minutes
except ValueError:
print('ValueError - Error calculating - Probably system time was changed')
print("PREVIOUS - " + str(data[x - 1]))
print("THIS - " + str(data[x]))
pass
if diff > idle_time:
try:
if timeline[-1][0] == "Inactive":
timeline[-1][1] += usage_minutes
else:
timeline.append(["Inactive", usage_minutes])
except IndexError:
timeline.append(["Inactive", usage_minutes])
else:
try:
apps[data[x - 1][1]] += usage_minutes
# apps[data[x - 1][1].split(' - ')[0]] += usage_minutes ======= this was here
# if timeline[-1][0] == data[x - 1][1].split(' - ')[0]: ========== this also
if timeline[-1][0] == data[x - 1][1]:
timeline[-1][1] += usage_minutes
try:
if data[x - 1][1] in browsers:
mid = data[x - 1][6].split('-')
if len(mid) > 1:
window = '-'.join(mid[:-1])
else:
window = mid[0]
if window not in browsers[data[x - 1][1]]:
browsers[data[x - 1][1]].append(window)
browsers_real[data[x - 1][1]].append(data[x - 1][4] + ' | ' + window)
except Exception as e:
print(e)
else:
# timeline.append([data[x - 1][1].split(' - ')[0], usage_minutes]) ========== this tooo
timeline.append([data[x - 1][1], usage_minutes])
try:
if data[x - 1][1] in browsers:
mid = data[x - 1][6].split('-')
if len(mid) > 1:
window = '-'.join(mid[:-1])
else:
window = mid[0]
if window not in browsers[data[x - 1][1]]:
browsers[data[x - 1][1]].append(window)
browsers_real[data[x - 1][1]].append(data[x - 1][4] + ' | ' + window)
except Exception as e:
print(e)
except ValueError:
print('ValueError - Error calculating - Probably system time was changed')
pass
except IndexError:
# timeline.append([data[x - 1][1].split(' - ')[0], usage_minutes]) =========== this as well
timeline.append([data[x - 1][1], usage_minutes])
x += 1
try:
time_large = "23:59:59" # get time of 2nd record
time_small = data[-1][4].split('.')[0] # get time of last record
diff = datetime.datetime.strptime(time_large, fmt) - datetime.datetime.strptime(time_small, fmt) # big - small
dt = datetime.datetime.strptime(str(diff), '%H:%M:%S') # convert to H-M-S
delta = datetime.timedelta(hours=dt.hour, minutes=dt.minute, seconds=dt.second) # covert to time delta
no_data_time = delta.total_seconds() / 60 # convert to seconds then to minutes
if no_data_time > 3:
if today == data[-1][3]:
timeline.append(["No Data", no_data_time])
else:
timeline.append(["Inactive", no_data_time])
except (ValueError, IndexError):
print('ValueError - Error calculating - Probably system time was changed')
pass
for record in timeline:
timeline_colors.append(apps_colors[record[0]])
return apps, timeline, timeline_colors, apps_colors
def give_downloads(db_path, date):
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('SELECT * FROM downloads WHERE d="' + date + '"')
data = c.fetchall()
return data
# def give_network_usage():
# stat = psutil.net_io_counters()
# downloads = round(stat.bytes_recv/1024/1024, 2)
# uploads = round(stat.bytes_sent/1024/1024, 2)
# return downloads, uploads
def app_stats(db_path, date):
apps, timeline, timeline_colors, apps_colors = give_usage(db_path, date)
apps_copy = apps
global browsers_real
total_usage = 0
for app in apps:
total_usage += apps[app]
try:
first = max(apps, key=apps.get)
first_app = [first, convert_time(apps_copy[first], 1)]
apps[first] = -1
second = max(apps, key=apps.get)
second_app = [second, convert_time(apps_copy[second], 1)]
apps[second] = -1
third = max(apps, key=apps.get)
third_app = [third, convert_time(apps_copy[third], 1)]
except ValueError:
first_app = ""
second_app = ""
third_app = ""
final_browsers = {}
for browser in browsers_real:
if len(browsers_real[browser]) != 0:
final_browsers[browser] = browsers_real[browser]
else:
pass
final_time = convert_time(total_usage)
return final_time, first_app, second_app, third_app, final_browsers
def give_word_count(db_path, date):
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('SELECT * FROM key_logger WHERE d="' + date + '"')
data = c.fetchall()
words = 0
for row in data:
words += int(row[6])
return words
def convert_time(minutes, t=0):
usage = str(datetime.timedelta(minutes=minutes)).split(':')
hours = usage[0]
minutes = usage[1]
seconds = str(round(float(usage[2]), 0)).split('.')[0]
final_time = ''
if hours != '00' and hours != '0':
if hours == '1':
final_time += str(hours) + " hour\n"
else:
final_time += str(hours) + " hours\n"
if minutes != '00':
if minutes == '1' or minutes == '01':
final_time += str(minutes) + " minute\n"
else:
final_time += str(minutes) + " minutes\n"
if (minutes == '00' and (hours == '0' or hours == '00')) or t == 0:
if seconds == '1' or seconds == '01':
final_time += str(seconds) + " second"
else:
final_time += str(seconds) + " seconds"
return final_time
def give_screenshots(db_path, date):
screenshots = {}
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('SELECT * FROM tracker WHERE d="' + date + '" AND pic != "no_ss"')
data = c.fetchall()
for row in data:
app = row[1].replace(".", "_")
app = app.replace(" ", "_")
if app in screenshots:
screenshots[app].append(row)
else:
screenshots[app] = [row]
return screenshots
def give_url_data(db_path, date):
url = {}
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('SELECT url, t, start_t, end_t FROM url_data WHERE d="' + date + '" ORDER BY t DESC')
data = c.fetchall()
for row in data:
if row[0] in url:
url[row[0]] += row[1]
else:
url[row[0]] = row[1]
return url
def give_eye_tracker_data(db_path, date):
fmt = '%H:%M:%S'
today = datetime.date.today().strftime("%Y-%m-%d")
idle_time = datetime.timedelta(seconds=188)
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('SELECT * FROM eye_tracker WHERE d="' + date + '" ORDER BY t')
data = c.fetchall()
timeline = []
timeline_colors = []
try:
time_large = data[0][2].split('.')[0] # get time of 1st record
time_small = "00:00:00" # midnight
diff = datetime.datetime.strptime(time_large, fmt) - datetime.datetime.strptime(time_small, fmt) # big - small
dt = datetime.datetime.strptime(str(diff), '%H:%M:%S') # convert to H-M-S
delta = datetime.timedelta(hours=dt.hour, minutes=dt.minute, seconds=dt.second) # covert to time delta
midnight_minutes = delta.total_seconds() / 60 # convert to seconds then to minutes
if midnight_minutes > 3.1:
timeline.append(["Inactive", midnight_minutes])
else:
timeline.append([data[0][1], midnight_minutes])
except (ValueError, IndexError):
pass
x = 1
while x < len(data):
try:
time_large = data[x][2].split('.')[0] # get time of 2nd record
time_small = data[x - 1][2].split('.')[0] # get time of 1st record
diff = datetime.datetime.strptime(time_large, fmt) - datetime.datetime.strptime(time_small,
fmt) # big - small
dt = datetime.datetime.strptime(str(diff), '%H:%M:%S') # convert to H-M-S
delta = datetime.timedelta(hours=dt.hour, minutes=dt.minute, seconds=dt.second) # covert to time delta
usage_minutes = delta.total_seconds() / 60 # convert to seconds then to minutes
except ValueError:
print('ValueError - Error calculating - Probably system time was changed')
print("PREVIOUS - " + str(data[x - 1]))
print("THIS - " + str(data[x]))
pass
if diff > idle_time:
try:
if timeline[-1][0] == "Inactive":
timeline[-1][1] += usage_minutes
else:
timeline.append(["Inactive", usage_minutes])
except IndexError:
timeline.append(["Inactive", usage_minutes])
else:
try:
if data[x - 1][3] == 0:
if data[x - 1][5] == 1:
if timeline[-1][0] == "Active":
timeline[-1][1] += usage_minutes
else:
timeline.append(["Active", usage_minutes])
elif data[x - 1][4] == 1:
if timeline[-1][0] == "Active":
timeline[-1][1] += usage_minutes
else:
timeline.append(["Active", usage_minutes])
else:
timeline.append(["Inactive", usage_minutes])
elif data[x - 1][3] == 1:
if timeline[-1][0] == "Camera not found / in use":
timeline[-1][1] += usage_minutes
else:
timeline.append(["Camera not found / in use", usage_minutes])
except ValueError:
print('ValueError - Error calculating - Probably system time was changed')
pass
except IndexError:
timeline.append([data[x - 1][1], usage_minutes])
x += 1
try:
time_large = "23:59:59" # get time of 2nd record
time_small = data[-1][2].split('.')[0] # get time of last record
diff = datetime.datetime.strptime(time_large, fmt) - datetime.datetime.strptime(time_small, fmt) # big - small
dt = datetime.datetime.strptime(str(diff), '%H:%M:%S') # convert to H-M-S
delta = datetime.timedelta(hours=dt.hour, minutes=dt.minute, seconds=dt.second) # covert to time delta
no_data_time = delta.total_seconds() / 60 # convert to seconds then to minutes
if no_data_time > 3:
if today == data[-1][1]:
timeline.append(["No Data", no_data_time])
else:
timeline.append(["Inactive", no_data_time])
except (ValueError, IndexError):
print('ValueError - Error calculating - Probably system time was changed')
pass
for record in timeline:
if record[0] == "Active":
timeline_colors.append("#2ca02c")
elif record[0] == "Inactive":
timeline_colors.append("#eeeeee")
elif record[0] == "No Data":
timeline_colors.append("#ffffff")
elif record[0] == "Camera not found / in use":
timeline_colors.append("#d62728")
return timeline, timeline_colors
def get_hourly_data(db_path, date, slot):
slots = {
1: ("00:00:00", "01:00:00"),
2: ("01:00:00", "02:00:00"),
3: ("02:00:00", "03:00:00"),
4: ("03:00:00", "04:00:00"),
5: ("04:00:00", "05:00:00"),
6: ("05:00:00", "06:00:00"),
7: ("06:00:00", "07:00:00"),
8: ("07:00:00", "08:00:00"),
9: ("08:00:00", "09:00:00"),
10: ("09:00:00", "10:00:00"),
11: ("10:00:00", "11:00:00"),
12: ("11:00:00", "12:00:00"),
13: ("12:00:00", "13:00:00"),
14: ("13:00:00", "14:00:00"),
15: ("14:00:00", "15:00:00"),
16: ("15:00:00", "16:00:00"),
17: ("16:00:00", "17:00:00"),
18: ("17:00:00", "18:00:00"),
19: ("18:00:00", "19:00:00"),
20: ("19:00:00", "20:00:00"),
21: ("20:00:00", "21:00:00"),
22: ("21:00:00", "22:00:00"),
23: ("22:00:00", "23:00:00"),
24: ("23:00:00", "24:00:00")
}
try:
low_time = slots[slot][0]
high_time = slots[slot][1]
except KeyError:
return {}
global browsers, browsers_real
browsers = {'Opera': [], 'Chrome': [], 'Microsoft Edge': [], 'Iexplore': [], 'Firefox': [], 'Safari': [],
'Edge (Chromium)': []}
browsers_real = {'Opera': [], 'Chrome': [], 'Microsoft Edge': [], 'Iexplore': [], 'Firefox': [], 'Safari': [],
'Edge (Chromium)': []}
colors = [
'#1f77b4', '#2ca02c', '#d62728', '#ff7f0e', '#9467bd', '#e377c2', '#bcbd22', '#17becf', '#8c564b', '#9edae5',
'#aec7e8', '#ffbb78', '#98df8a', '#ff9896', '#c5b0d5', '#c49c94', '#f7b6d2', '#c7c7c7', '#dbdb8d', '#7f7f7f'
]
fmt = '%H:%M:%S'
idle_time = datetime.timedelta(seconds=185)
today = datetime.date.today().strftime("%Y-%m-%d")
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute(
'SELECT * FROM tracker WHERE t < time("' + high_time + '") AND t > time("' + low_time + '") AND d = "' + date + '" ')
data = c.fetchall()
c.execute('SELECT DISTINCT app FROM tracker WHERE d="' + date + '"')
applications = c.fetchall()
# create dictionary
apps = {}
apps_colors = {}
timeline = []
timeline_colors = []
for row in applications:
app_name = row[0].split(' - ')[0]
apps[app_name] = 0
apps_colors['Inactive'] = '#eeeeee'
apps_colors['No Data'] = '#ffffff'
if app_name not in apps_colors:
apps_colors[app_name] = colors[0]
colors.append(colors.pop(0))
try:
time_large = data[0][4].split('.')[0] # get time of 1st record
time_small = low_time # midnight
diff = datetime.datetime.strptime(time_large, fmt) - datetime.datetime.strptime(time_small, fmt) # big - small
dt = datetime.datetime.strptime(str(diff), '%H:%M:%S') # convert to H-M-S
delta = datetime.timedelta(hours=dt.hour, minutes=dt.minute, seconds=dt.second) # covert to time delta
midnight_minutes = delta.total_seconds() / 60 # convert to seconds then to minutes
if midnight_minutes > 3.1:
timeline.append(["Inactive", midnight_minutes])
else:
timeline.append([data[0][1], midnight_minutes])
except (ValueError, IndexError):
pass
x = 1
while x < len(data):
try:
time_large = data[x][4].split('.')[0] # get time of 2nd record
time_small = data[x - 1][4].split('.')[0] # get time of 1st record
diff = datetime.datetime.strptime(time_large, fmt) - datetime.datetime.strptime(time_small,
fmt) # big - small
dt = datetime.datetime.strptime(str(diff), '%H:%M:%S') # convert to H-M-S
delta = datetime.timedelta(hours=dt.hour, minutes=dt.minute, seconds=dt.second) # covert to time delta
usage_minutes = delta.total_seconds() / 60 # convert to seconds then to minutes
except ValueError:
print('ValueError - Error calculating - Probably system time was changed')
print("PREVIOUS - " + str(data[x - 1]))
print("THIS - " + str(data[x]))
pass
if diff > idle_time:
try:
if timeline[-1][0] == "Inactive":
timeline[-1][1] += usage_minutes
else:
timeline.append(["Inactive", usage_minutes])
except IndexError:
timeline.append(["Inactive", usage_minutes])
else:
try:
apps[data[x - 1][1]] += usage_minutes
# apps[data[x - 1][1].split(' - ')[0]] += usage_minutes ======= this was here
# if timeline[-1][0] == data[x - 1][1].split(' - ')[0]: ========== this also
if timeline[-1][0] == data[x - 1][1]:
timeline[-1][1] += usage_minutes
try:
if data[x - 1][1] in browsers:
mid = data[x - 1][6].split('-')
if len(mid) > 1:
window = '-'.join(mid[:-1])
else:
window = mid[0]
if window not in browsers[data[x - 1][1]]:
browsers[data[x - 1][1]].append(window)
browsers_real[data[x - 1][1]].append(data[x - 1][4] + ' | ' + window)
except Exception as e:
print(e)
else:
# timeline.append([data[x - 1][1].split(' - ')[0], usage_minutes]) ========== this tooo
timeline.append([data[x - 1][1], usage_minutes])
try:
if data[x - 1][1] in browsers:
mid = data[x - 1][6].split('-')
if len(mid) > 1:
window = '-'.join(mid[:-1])
else:
window = mid[0]
if window not in browsers[data[x - 1][1]]:
browsers[data[x - 1][1]].append(window)
browsers_real[data[x - 1][1]].append(data[x - 1][4] + ' | ' + window)
except Exception as e:
print(e)
except ValueError:
print('ValueError - Error calculating - Probably system time was changed')
pass
except IndexError:
# timeline.append([data[x - 1][1].split(' - ')[0], usage_minutes]) =========== this as well
timeline.append([data[x - 1][1], usage_minutes])
x += 1
try:
time_large = high_time # get time of 2nd record
time_small = data[-1][4].split('.')[0] # get time of last record
diff = datetime.datetime.strptime(time_large, fmt) - datetime.datetime.strptime(time_small, fmt) # big - small
dt = datetime.datetime.strptime(str(diff), '%H:%M:%S') # convert to H-M-S
delta = datetime.timedelta(hours=dt.hour, minutes=dt.minute, seconds=dt.second) # covert to time delta
no_data_time = delta.total_seconds() / 60 # convert to seconds then to minutes
if no_data_time > 3:
if today == data[-1][3]:
timeline.append(["No Data", no_data_time])
else:
timeline.append(["Inactive", no_data_time])
except (ValueError, IndexError):
print('ValueError - Error calculating - Probably system time was changed')
pass
for record in timeline:
timeline_colors.append(apps_colors[record[0]])
return apps, timeline, timeline_colors, apps_colors
def get_hourly_keystrokes(db_path, date, slot):
app_words = {}
slots = {
1: ("00:00:00", "01:00:00"),
2: ("01:00:00", "02:00:00"),
3: ("02:00:00", "03:00:00"),
4: ("03:00:00", "04:00:00"),
5: ("04:00:00", "05:00:00"),
6: ("05:00:00", "06:00:00"),
7: ("06:00:00", "07:00:00"),
8: ("07:00:00", "08:00:00"),
9: ("08:00:00", "09:00:00"),
10: ("09:00:00", "10:00:00"),
11: ("10:00:00", "11:00:00"),
12: ("11:00:00", "12:00:00"),
13: ("12:00:00", "13:00:00"),
14: ("13:00:00", "14:00:00"),
15: ("14:00:00", "15:00:00"),
16: ("15:00:00", "16:00:00"),
17: ("16:00:00", "17:00:00"),
18: ("17:00:00", "18:00:00"),
19: ("18:00:00", "19:00:00"),
20: ("19:00:00", "20:00:00"),
21: ("20:00:00", "21:00:00"),
22: ("21:00:00", "22:00:00"),
23: ("22:00:00", "23:00:00"),
24: ("23:00:00", "24:00:00")
}
try:
low_time = slots[slot][0]
high_time = slots[slot][1]
except KeyError:
return {}
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute(
'SELECT * FROM key_logger_new WHERE t < time("' + high_time + '") AND t > time("' + low_time + '") AND d = "' + date + '" ')
data = c.fetchall()
for row in data:
app = str(row[2])
word_count = int(row[6])
content = row[5]
copy = int(row[7])
paste = int(row[8])
id = "_".join(app.split(" "))
if app in app_words:
app_words[app] = (id, app_words[app][1] + "\n" + content, app_words[app][2] + word_count, app_words[app][3] + copy, app_words[app][4] + paste )
else:
app_words[app] = (id, content, word_count, copy, paste)
print(app_words)
return app_words
def get_hourly_urls(db_path, date, slot):
url = {}
slots = {
1: ("00:00:00", "01:00:00"),
2: ("01:00:00", "02:00:00"),
3: ("02:00:00", "03:00:00"),
4: ("03:00:00", "04:00:00"),
5: ("04:00:00", "05:00:00"),
6: ("05:00:00", "06:00:00"),
7: ("06:00:00", "07:00:00"),
8: ("07:00:00", "08:00:00"),
9: ("08:00:00", "09:00:00"),
10: ("09:00:00", "10:00:00"),
11: ("10:00:00", "11:00:00"),
12: ("11:00:00", "12:00:00"),
13: ("12:00:00", "13:00:00"),
14: ("13:00:00", "14:00:00"),
15: ("14:00:00", "15:00:00"),
16: ("15:00:00", "16:00:00"),
17: ("16:00:00", "17:00:00"),
18: ("17:00:00", "18:00:00"),
19: ("18:00:00", "19:00:00"),
20: ("19:00:00", "20:00:00"),
21: ("20:00:00", "21:00:00"),
22: ("21:00:00", "22:00:00"),
23: ("22:00:00", "23:00:00"),
24: ("23:00:00", "24:00:00")
}
try:
low_time = slots[slot][0]
high_time = slots[slot][1]
except KeyError:
return {}
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute(
'SELECT url, t FROM url_data WHERE end_t < time("' + high_time + '") AND start_t > time("' + low_time + '") AND d = "' + date + '" ')
data = c.fetchall()
for row in data:
if row[0] in url:
url[row[0]] += row[1]
else:
url[row[0]] = row[1]
return url
def give_clipboard_data(db_path, date):
clipboard = {}
clipboard_data_list = []
row_id = 0
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('SELECT * FROM clipboard_monitor WHERE d="' + date + '" ORDER BY t')
data = c.fetchall()
for row in data:
app = row[4]
t = row[2]
content = row[3]
if app in clipboard:
clipboard[app] = clipboard[app] + t + " ===================\n" + content + "\n\n"
else:
clipboard[app] = t + " ===================\n" + content + "\n\n"
for record in clipboard:
clipboard_data_list.append((row_id, record, clipboard[record]))
row_id += 1
return clipboard_data_list
def give_keylogger_data(db_path, date):
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('SELECT * FROM key_logger WHERE d="' + date + '"')
data = c.fetchall()
return data
def give_network_data(db_path, date):
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('SELECT * FROM network_monitor WHERE d="' + date + '"')
data = c.fetchall()
return data
| 38.6
| 155
| 0.502737
| 3,409
| 27,406
| 3.949839
| 0.070402
| 0.044857
| 0.022726
| 0.017156
| 0.843149
| 0.824211
| 0.818641
| 0.811586
| 0.794876
| 0.788934
| 0
| 0.081784
| 0.332555
| 27,406
| 710
| 156
| 38.6
| 0.65433
| 0.073962
| 0
| 0.778151
| 0
| 0
| 0.146977
| 0.001659
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023529
| false
| 0.021849
| 0.003361
| 0
| 0.055462
| 0.033613
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
41f017b2da42e012b11e0cd13bf75bba7f4d2273
| 3,791
|
py
|
Python
|
hallo/test/modules/random/test_foof.py
|
SpangleLabs/Hallo
|
17145d8f76552ecd4cbc5caef8924bd2cf0cbf24
|
[
"MIT"
] | 1
|
2022-01-27T13:25:01.000Z
|
2022-01-27T13:25:01.000Z
|
hallo/test/modules/random/test_foof.py
|
joshcoales/Hallo
|
17145d8f76552ecd4cbc5caef8924bd2cf0cbf24
|
[
"MIT"
] | 75
|
2015-09-26T18:07:18.000Z
|
2022-01-04T07:15:11.000Z
|
hallo/test/modules/random/test_foof.py
|
SpangleLabs/Hallo
|
17145d8f76552ecd4cbc5caef8924bd2cf0cbf24
|
[
"MIT"
] | 1
|
2021-04-10T12:02:47.000Z
|
2021-04-10T12:02:47.000Z
|
from datetime import datetime
from hallo.events import EventMessage
def test_short_doof(mock_roller, hallo_getter):
test_hallo = hallo_getter({"random"})
for x in range(21):
# Set RNG
mock_roller.answer = x
# Check
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "fooooooof")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert "doof" == data[0].text.lower(), "Should be short doof."
def test_medium_doof(mock_roller, hallo_getter):
test_hallo = hallo_getter({"random"})
for x in range(21, 41):
# Set RNG
mock_roller.answer = x
# Check
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "foof")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert "doooooof" == data[0].text.lower(), "Should be medium doof."
def test_long_doof(mock_roller, hallo_getter):
test_hallo = hallo_getter({"random"})
for x in range(41, 60):
if x == 40 + 15:
continue
# Set RNG
mock_roller.answer = x
# Check
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "foooof")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_user, EventMessage)
assert (
"ddddoooooooooooooooooooooffffffffff." == data[0].text.lower()
), "Should be long doof."
def test_mega_doof(mock_roller, hallo_getter):
test_hallo = hallo_getter({"random"})
# Set RNG
mock_roller.answer = 55
# Check
start_time = datetime.now()
test_hallo.function_dispatcher.dispatch(
EventMessage(test_hallo.test_server, None, test_hallo.test_user, "foof")
)
end_time = datetime.now()
data = test_hallo.test_server.get_send_data(2, test_hallo.test_user, EventMessage)
assert "powering up..." == data[0].text.lower(), "Should have powered up."
assert (
end_time - start_time
).seconds > 3, "Should have had a delay between powering up and mega doof."
assert len(data[1].text.lower()) > 1000, "doof should be extra long."
assert "!" in data[1].text, "doof should have exclamation mark."
def test_passive_foof(mock_roller, hallo_getter):
test_hallo = hallo_getter({"random"})
mock_roller.answer = 0
test_hallo.function_dispatcher.dispatch_passive(
EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "foof")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "doof" == data[0].text.lower(), "Should be short doof."
def test_passive_foof_exclamation(mock_roller, hallo_getter):
test_hallo = hallo_getter({"random"})
mock_roller.answer = 0
test_hallo.function_dispatcher.dispatch_passive(
EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "foof!")
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "doof" == data[0].text.lower(), "Should be short doof."
def test_passive_long_foof(mock_roller, hallo_getter):
test_hallo = hallo_getter({"random"})
mock_roller.answer = 0
test_hallo.function_dispatcher.dispatch_passive(
EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "foooooooooooooooof"
)
)
data = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "doof" == data[0].text.lower(), "Should be short doof."
| 38.683673
| 100
| 0.684516
| 505
| 3,791
| 4.845545
| 0.154455
| 0.165509
| 0.164691
| 0.108705
| 0.800981
| 0.783817
| 0.751532
| 0.751532
| 0.737638
| 0.737638
| 0
| 0.013311
| 0.207333
| 3,791
| 97
| 101
| 39.082474
| 0.800998
| 0.014508
| 0
| 0.459459
| 0
| 0
| 0.116448
| 0.009659
| 0
| 0
| 0
| 0
| 0.135135
| 1
| 0.094595
| false
| 0.081081
| 0.027027
| 0
| 0.121622
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
41f6e2e1295800b9e12bfe74a8708f495035977f
| 3,808
|
py
|
Python
|
executor/tests/database/test_user.py
|
Ganyuhao/executor
|
8f47e0bdccfe476373487e00be2b6e95885045d9
|
[
"MIT"
] | null | null | null |
executor/tests/database/test_user.py
|
Ganyuhao/executor
|
8f47e0bdccfe476373487e00be2b6e95885045d9
|
[
"MIT"
] | null | null | null |
executor/tests/database/test_user.py
|
Ganyuhao/executor
|
8f47e0bdccfe476373487e00be2b6e95885045d9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""user表测试"""
from executor.database.models.user import Users
from executor.tests.database.base import DatabaseTestCase
from executor.exceptions import UserAlreadyExistException, \
IncorrectPasswordException
class TestOperatorUser(DatabaseTestCase):
data_file_path = "database_user_data.yaml"
def test_create_user(self):
user = Users.from_json(self.get_test_date("test_create_user"))
new_user = self.db.create_user(self.context, user)
self.assertIsInstance(new_user, Users)
self.db.delete_user(self.context, new_user.id, new_user.password)
def test_create_same_name_user(self):
user1 = Users.from_json(
self.get_test_date(
"test_create_same_name_user", "test_create_same_name_user1"))
user2 = Users.from_json(
self.get_test_date(
"test_create_same_name_user", "test_create_same_name_user2"))
self.db.create_user(self.context, user1)
self.assertRaises(UserAlreadyExistException,
self.db.create_user, self.context, user2)
self.db.delete_user(self.context, user1.phone, user1.password)
def test_create_same_phone_user(self):
user1 = Users.from_json(
self.get_test_date(
"test_create_same_phone_user", "test_create_same_phone_user1"
))
user2 = Users.from_json(
self.get_test_date(
"test_create_same_phone_user", "test_create_same_phone_user2"
))
self.db.create_user(self.context, user1)
self.assertRaises(UserAlreadyExistException,
self.db.create_user, self.context, user2)
self.db.delete_user(self.context, user1.phone, user1.password)
def test_get_user_by_id(self):
user = Users.from_json(
self.get_test_date("test_get_user_by_id")
)
n_user = self.db.create_user(self.context, user)
self.assertEqual(
n_user,
self.db.get_user(self.context, user.id, user.password))
self.db.delete_user(self.context, n_user.id, n_user.password)
def test_get_user_by_user_id(self):
user = Users.from_json(
self.get_test_date("test_get_user_by_user_id")
)
n_user = self.db.create_user(self.context, user)
self.assertEqual(
n_user,
self.db.get_user(self.context, user.user_id, user.password))
self.db.delete_user(self.context, n_user.user_id, n_user.password)
def test_get_user_by_name(self):
user = Users.from_json(
self.get_test_date("test_get_user_by_name")
)
n_user = self.db.create_user(self.context, user)
self.assertEqual(
n_user,
self.db.get_user(self.context, user.username, user.password))
self.db.delete_user(self.context, n_user.username, n_user.password)
def test_get_user_by_phone(self):
user = Users.from_json(
self.get_test_date("test_get_user_by_phone")
)
n_user = self.db.create_user(self.context, user)
self.assertEqual(
n_user,
self.db.get_user(self.context, user.phone, user.password))
self.db.delete_user(self.context, n_user.phone, n_user.password)
def test_get_user_with_incorrect_password(self):
user = Users.from_json(
self.get_test_date("test_get_user_with_incorrect_password")
)
n_user = self.db.create_user(self.context, user)
self.assertRaises(
IncorrectPasswordException,
self.db.get_user, self.context, n_user.phone,
n_user.password + "_"
)
self.db.delete_user(self.context, n_user.id, n_user.password)
| 39.666667
| 77
| 0.654674
| 502
| 3,808
| 4.621514
| 0.105578
| 0.144828
| 0.148707
| 0.073276
| 0.828017
| 0.801293
| 0.760345
| 0.75431
| 0.74181
| 0.703448
| 0
| 0.005923
| 0.246324
| 3,808
| 95
| 78
| 40.084211
| 0.802439
| 0.01313
| 0
| 0.463415
| 0
| 0
| 0.10104
| 0.091442
| 0
| 0
| 0
| 0
| 0.097561
| 1
| 0.097561
| false
| 0.207317
| 0.036585
| 0
| 0.158537
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
41fcc1fba379ed10509cd06ff4c5edae8847cc9d
| 278,516
|
py
|
Python
|
back/perturbtrafic_api/views/default.py
|
kalbermattenm/perturbtrafic
|
9a6a05ea3eec346e7252fa6172701208a6a53789
|
[
"BSD-3-Clause"
] | null | null | null |
back/perturbtrafic_api/views/default.py
|
kalbermattenm/perturbtrafic
|
9a6a05ea3eec346e7252fa6172701208a6a53789
|
[
"BSD-3-Clause"
] | null | null | null |
back/perturbtrafic_api/views/default.py
|
kalbermattenm/perturbtrafic
|
9a6a05ea3eec346e7252fa6172701208a6a53789
|
[
"BSD-3-Clause"
] | null | null | null |
from pyramid.view import view_config
from pyramid.response import Response
from sqlalchemy import exc, func
from sqlalchemy import *
from sqlalchemy.schema import Sequence
from .. import models
from ..scripts.wfs_query import WFSQuery
from ..scripts.ldap_query import LDAPQuery
from ..scripts.utils import Utils
from ..scripts.evenements_xml import EvenementXML
from ..scripts.pt_mailer import PTMailer
from ..exceptions.custom_error import CustomError
from datetime import datetime, date, timedelta
import transaction
import json
import requests
import logging
import datetime
from pyramid.httpexceptions import HTTPFound, HTTPForbidden
log = logging.getLogger(__name__)
general_exception = 'An error occured while executing the query'
id_not_found_exception = 'Id not found'
user_not_found_exception = 'User not found'
not_authorized_exception = 'Not authorized'
########################################################
# Home view
########################################################
@view_config(route_name='home', renderer='../templates/home.jinja2')
@view_config(route_name='home_slash', renderer='../templates/home.jinja2')
def home_view(request):
return {}
########################################################
# Type evenement by id view
########################################################
@view_config(route_name='type_evenement_by_id', request_method='GET', renderer='json')
def type_evenement_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
id = request.matchdict['id']
query = request.dbsession.query(models.TypeEvenement)
result = query.filter(models.TypeEvenement.id == id).first()
if not result:
raise Exception(id_not_found_exception)
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return result
########################################################
# Types evenements view
########################################################
@view_config(route_name='types_evenements', request_method='GET', renderer='json')
@view_config(route_name='types_evenements_slash', request_method='GET', renderer='json')
def types_evenements_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.TypeEvenement).all()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return query
########################################################
# Get evenement by id view
########################################################
@view_config(route_name='evenement_by_id', request_method='GET', renderer='json')
def get_evenement_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
id = request.matchdict['id']
query = request.dbsession.query(models.Evenement)
result = query.filter(models.Evenement.id == id).first()
if not result:
raise Exception(id_not_found_exception)
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return result.format()
########################################################
# Delete evenement by id view
########################################################
@view_config(route_name='evenement_by_id', request_method='DELETE', renderer='json')
def delete_evenement_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
id = request.matchdict['id']
# Check authorization
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
current_user_id = Utils.get_connected_user_id(request)
current_user_id = int(current_user_id) if current_user_id else None
# Check if the user has permission to delete evenement
user_can_delete_evenement = Utils.user_can_delete_evenement(request, current_user_id, id)
if not user_can_delete_evenement:
raise HTTPForbidden()
# Evenement
query = request.dbsession.query(models.Evenement)
evenement = query.filter(models.Evenement.id == id).first()
# Related perturbation
query_p = request.dbsession.query(models.Perturbation)
perturbations = query_p.filter(models.Perturbation.id_evenement == id).all()
if not evenement:
raise Exception(id_not_found_exception)
with transaction.manager:
evenement.date_suppression = func.now()
for p in perturbations:
p.date_suppression = func.now()
# Commit transaction
transaction.commit()
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Evenements view
########################################################
@view_config(route_name='evenements', request_method='GET', renderer='json')
@view_config(route_name='evenements_slash', request_method='GET', renderer='json')
def evenements_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.Evenement).all()
formattedResult = []
for evenement in query:
formattedResult.append(evenement.format())
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return formattedResult
########################################################
# Libeles evenements view
########################################################
@view_config(route_name='libelles_evenements', request_method='GET', renderer='json')
@view_config(route_name='libelles_evenements_slash', request_method='GET', renderer='json')
def libelles_evenements_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
current_user_id = Utils.get_connected_user_id(request)
id_entite = None
# Read params evenement
if 'idEntite' in request.params:
id_entite = request.params['idEntite']
query = request.dbsession.query(models.PerturbationPourUtilisateurAjout).filter(models.PerturbationPourUtilisateurAjout.id_utilisateur == current_user_id).filter(models.PerturbationPourUtilisateurAjout.id_entite == id_entite).all()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return query
########################################################
# Evenement arrivant à échéance view
########################################################
@view_config(route_name='evenements_echeance', request_method='GET', renderer='json')
@view_config(route_name='evenements_echeance_slash', request_method='GET', renderer='json')
def evenements_echeance_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
current_user_id = Utils.get_connected_user_id(request)
id_entite = None
# Read params evenement
if 'idEntite' in request.params:
id_entite = request.params['idEntite']
query = request.dbsession.query(models.EvenementEcheance).filter(models.EvenementEcheance.id_utilisateur == current_user_id).filter(models.EvenementEcheance.id_entite == id_entite).all()
evenements_array = []
for evenement in query:
evenements_array.append(evenement.format())
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return evenements_array
########################################################
# Evenement impression by id
########################################################
@view_config(route_name='evenement_impression_by_id', request_method='GET', renderer='json')
def evenement_impression_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
id = request.matchdict['id']
query = request.dbsession.query(models.EvenementImpression).filter(models.EvenementImpression.id == id).first()
if not query:
raise Exception(id_not_found_exception)
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return query.format()
########################################################
# Evenement_perturbations impression by id
########################################################
@view_config(route_name='evenement_perturbations_impression_by_id', request_method='GET', renderer='json')
def evenement_perturbations_impression_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
id = request.matchdict['id']
query_evenement = request.dbsession.query(models.EvenementImpression).filter(models.EvenementImpression.id == id).first()
if not query_evenement:
raise Exception(id_not_found_exception)
# Get perturbations related to the evenement
perturbations_ids = []
perturbations_impression = []
query_perturbations = request.dbsession.query(models.Perturbation).filter(models.Perturbation.id_evenement == id).all()
for item in query_perturbations:
perturbations_ids.append(item.id)
query_perturbations_impression = request.dbsession.query(models.PerturbationImpression).filter(models.PerturbationImpression.id.in_(perturbations_ids)).all()
if query_perturbations_impression:
for item in query_perturbations_impression:
perturbations_impression.append(item.format())
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return {'evenement': query_evenement.format(), 'perturbations' : perturbations_impression}
########################################################
# Get Evenement edition by id view
########################################################
@view_config(route_name='evenement_edition_by_id', request_method='GET', renderer='json')
def evenement_edition_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
id = request.matchdict['id']
#Check authorization
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
current_user_id = Utils.get_connected_user_id(request)
# Check if the user has permission to add evenement
user_can_read_evenement = Utils.user_can_read_evenement(request, current_user_id, id)
if not user_can_read_evenement:
raise HTTPForbidden()
related_type = None
categories_chantiers = []
plans_types_fouille = []
# Evenement
query_evenement = request.dbsession.query(models.Evenement)
evenement = query_evenement.filter(models.Evenement.id == id).first()
# Utilisateur ajout
contact_utilisateur_ajout = request.dbsession.query(models.Contact).filter(
models.Contact.id == evenement.id_utilisateur_ajout).first()
# Utilisateur modification
contact_utilisateur_modification = request.dbsession.query(models.Contact).filter(
models.Contact.id == evenement.id_utilisateur_modification).first()
if not evenement:
raise Exception(id_not_found_exception)
# Type evenement : autre
if evenement.type == int(settings['autre_evenement_id']):
query = request.dbsession.query(models.AutreEvenement)
related_type = query.filter(models.AutreEvenement.id_evenement == id).first()
# Type evenement : Chantier
elif evenement.type == int(settings['chantier_evenement_id']):
query = request.dbsession.query(models.Chantier)
related_type = query.filter(models.Chantier.id_evenement == id).first()
# Categories chatier
for lcc, cc in request.dbsession.query(models.LienChantierCategorieChantier,
models.CategorieChantier).filter(
models.LienChantierCategorieChantier.id_chantier == related_type.id).filter(
models.CategorieChantier.id == models.LienChantierCategorieChantier.categorie).all():
categories_chantiers.append(cc)
# Type evenement : Fouille
elif evenement.type == int(settings['fouille_evenement_id']):
query = request.dbsession.query(models.Fouille)
related_type = query.filter(models.Fouille.id_evenement == id).first()
#Plan type
for lfp, pf in request.dbsession.query(models.LienFouillePlanType,
models.PlanTypeFouille).filter(
models.LienFouillePlanType.id_evenement == id).filter(
models.PlanTypeFouille.id == models.LienFouillePlanType.id_plan_type).all():
plans_types_fouille.append(pf)
# Type evenement : Manifestation
elif evenement.type == int(settings['manifestation_evenement_id']):
query = request.dbsession.query(models.Manifestation)
related_type = query.filter(models.Manifestation.id_evenement == id).first()
# Geometries
geometries_array = []
query_geom_point = request.dbsession.query(models.EvenementPoint.id,
func.public.ST_AsGeoJSON(models.EvenementPoint.geometry).label(
"geometry")).filter(
models.EvenementPoint.id_evenement == id).all()
query_geom_ligne = request.dbsession.query(models.EvenementLigne.id,
func.public.ST_AsGeoJSON(models.EvenementLigne.geometry).label(
"geometry")).filter(
models.EvenementLigne.id_evenement == id).all()
query_geom_polygone = request.dbsession.query(models.EvenementPolygone.id,
func.public.ST_AsGeoJSON(models.EvenementPolygone.geometry).label(
"geometry")).filter(
models.EvenementPolygone.id_evenement == id).all()
for item in query_geom_point + query_geom_ligne + query_geom_polygone:
geometries_array.append({'id': item.id, 'geometry': item.geometry})
# Reperage
reperages = []
evenement_lignes_ids = []
for item in query_geom_ligne:
evenement_lignes_ids.append(item.id)
if len(evenement_lignes_ids) > 0:
query_reperage = request.dbsession.query(models.Reperage).filter(
models.Reperage.id_evenement_ligne.in_(evenement_lignes_ids)).all()
if query_reperage:
for item in query_reperage:
reperages.append(item.format())
# Format evenement
evenement = evenement.format()
if contact_utilisateur_ajout:
evenement[
'nom_utilisateur_ajout'] = contact_utilisateur_ajout.prenom + ' ' + contact_utilisateur_ajout.nom
if contact_utilisateur_modification:
evenement[
'nom_utilisateur_modification'] = contact_utilisateur_modification.prenom + ' ' + contact_utilisateur_modification.nom
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return {'evenement': evenement, 'reperages': reperages,
'infos': {} if not related_type else related_type.format(), 'categories_chantiers': categories_chantiers, 'plans_types_fouille': plans_types_fouille, 'geometries': geometries_array}
########################################################
# Add Evenement edition
########################################################
@view_config(route_name='evenement_edition', request_method='POST', renderer='json')
@view_config(route_name='evenement_edition_slash', request_method='POST', renderer='json')
def add_evenement_edition(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
# Check authorization
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
current_user_id = Utils.get_connected_user_id(request)
current_user_id = int(current_user_id) if current_user_id else None
# Check if the user has permission to add evenement
user_can_add_evenement = Utils.user_can_add_evenement(request, current_user_id)
if not user_can_add_evenement:
raise HTTPForbidden()
max_event_id = None
categories_array = []
plan_types_array = []
# Default params value
"""Evenement"""
idEntite = None
idResponsable = None
idRequerant = None
type = None
numeroDossier = None
division = None
libelle = None
description = None
prevision = None
#urgence = None
dateDebut = None
heureDebut = None
dateFin = None
heureFin = None
localisation = None
# localite = None
# lieuDit = None
# reperageEffectif = None
nomRequerant = None
rueRequerant = None
localiteRequerant = None
telephoneRequerant = None
faxRequerant = None
courrielRequerant = None
nomContact = None
prenomContact = None
mobileContact = None
telephoneContact = None
faxContact = None
courrielContact = None
remarque = None
dateDemande = None
dateOctroi = None
#ajoutePar = None
# dateAjout = None
# modifiePar = None
# dateModification = None
# dateSuppression = None
# Geometries reperages
geometries_reperages = None
"""Autre evenement"""
_idMaitreOuvrage = None
_idDirectionLocale = None
_idEntrepreneur = None
_idResponsableTravaux = None
_cause = None
_nomMaitreOuvrage = None
_rueMaitreOuvrage = None
_localiteMaitreOuvrage = None
_telephoneMaitreOuvrage = None
_faxMaitreOuvrage = None
_courrielMaitreOuvrage = None
_nomDirectionLocale = None
_prenomDirectionLocale = None
_mobileDirectionLocale = None
_telephoneDirectionLocale = None
_faxDirectionLocale = None
_courrielDirectionLocale = None
_nomEntrepreneur = None
_rueEntrepreneur = None
_localiteEntrepreneur = None
_telephoneEntrepreneur = None
_faxEntrepreneur = None
_courrielEntrepreneur = None
_nomResponsableTravaux = None
_prenomResponsableTravaux = None
_mobileResponsableTravaux = None
_telephoneResponsableTravaux = None
_faxResponsableTravaux = None
_courrielResponsableTravaux = None
_facturation = None
# _coordonnesX = None
# _coordonnesY = None
# _commune = None
# _cadastre = None
# _bienFonds = None
# _autre_cadastre = None
# _autre_bienFonds = None
# _lieuDit = None
_dateDebutValide = None
_dateFinValide = None
_dateMajValide = None
_numeroFacture = None
_dateFacture = None
_reserveEventuelle = None
"""Chantier"""
_idMaitreOuvrage = None
_idDirectionLocale = None
_idEntrepreneur = None
_idResponsableTravaux = None
_projet = None
_longueurEtape = None
_surface = None
_idCentraleEnrobage = None
_epaisseurCaisson = None
_qualiteCaisson = None
_epaisseurSupport = None
_qualiteSupport = None
_epaisseurRevetement = None
_qualiteRevetement = None
_qualiteEncollage = None
_boucleInduction = None
_faucherAccotement = None
_curerDepotoirs = None
_nettoyer_bords = None
_colmater_fissure = None
_prTouches = None
_autre = None
_lieuSeance = None
_jourSeance = None
_heureSeance = None
_categories = None
_reperageEffectif = None
"""Fouille"""
_idMaitreOuvrage = None
_idDirectionLocale = None
_idEntrepreneur = None
_idResponsableTravaux = None
_nomMaitreOuvrage = None
_rueMaitreOuvrage = None
_localiteMaitreOuvrage = None
_telephoneMaitreOuvrage = None
_faxMaitreOuvrage = None
_courrielMaitreOuvrage = None
_nomDirectionLocale = None
_prenomDirectionLocale = None
_mobileDirectionLocale = None
_telephoneDirectionLocale = None
_faxDirectionLocale = None
_courrielDirectionLocale = None
_nomEntrepreneur = None
_rueEntrepreneur = None
_localiteEntrepreneur = None
_telephoneEntrepreneur = None
_faxEntrepreneur = None
_courrielEntrepreneur = None
_nomResponsableTravaux = None
_prenomResponsableTravaux = None
_mobileResponsableTravaux = None
_telephoneResponsableTravaux = None
_faxResponsableTravaux = None
_courrielResponsableTravaux = None
_facturation = None
'''
_coordonnesX = None
_coordonnesY = None
_commune = None
_cadastre = None
_bienFonds = None
_autreCadastre = None
_autreBienFonds = None
_lieuDit = None
'''
_prTouches = None
_longueurEtape = None
_epaisseurCaisson = None
_qualiteCaisson = None
_epaisseurSupport = None
_qualiteSupport = None
_epaisseurRevetement = None
_qualiteRevetement = None
_qualiteEncollage = None
_dateDebutValide = None
_dateFinValide = None
_dateMajValide = None
_numeroFacture = None
_dateFacture = None
_reserveEventuelle = None
_planTypes = None
_reperageEffectif = None
"""Manifestation"""
_parcours = None
# Read params evenement
if 'idEntite' in request.params:
idEntite = request.params['idEntite']
if 'idResponsable' in request.params:
idResponsable = request.params['idResponsable']
if 'idRequerant' in request.params:
idRequerant = request.params['idRequerant']
if 'type' in request.params:
type = request.params['type']
numeroDossier = Utils.generate_numero_dossier(request, type)
if 'division' in request.params:
division = request.params['division']
if 'libelle' in request.params:
libelle = request.params['libelle']
if 'description' in request.params:
description = request.params['description']
if 'prevision' in request.params:
prevision = request.params['prevision']
if prevision == 'true':
prevision = True
elif prevision == 'false':
prevision = False
else:
prevision = None
"""
if 'urgence' in request.params:
urgence = request.params['urgence']
if urgence == 'true':
urgence = True
elif urgence == 'false':
urgence = False
else:
urgence = None
"""
if 'dateDebut' in request.params:
dateDebut = request.params['dateDebut']
if 'heureDebut' in request.params:
heureDebut = request.params['heureDebut']
if 'dateFin' in request.params:
dateFin = request.params['dateFin']
if 'heureFin' in request.params:
heureFin = request.params['heureFin']
if 'localisation' in request.params:
localisation = request.params['localisation']
'''
if 'localite' in request.params:
localite = request.params['localite']
if 'lieuDit' in request.params:
lieuDit = request.params['lieuDit']
if 'reperageEffectif' in request.params:
reperageEffectif = request.params['reperageEffectif']
if reperageEffectif == 'true':
reperageEffectif = True
elif reperageEffectif == 'false':
reperageEffectif = False
else:
reperageEffectif = None
'''
if 'nomRequerant' in request.params:
nomRequerant = request.params['nomRequerant']
if 'rueRequerant' in request.params:
rueRequerant = request.params['rueRequerant']
if 'localiteRequerant' in request.params:
localiteRequerant = request.params['localiteRequerant']
if 'telephoneRequerant' in request.params:
telephoneRequerant = request.params['telephoneRequerant']
if 'faxRequerant' in request.params:
faxRequerant = request.params['faxRequerant']
if 'courrielRequerant' in request.params:
courrielRequerant = request.params['courrielRequerant']
if 'nomContact' in request.params:
nomContact = request.params['nomContact']
if 'prenomContact' in request.params:
prenomContact = request.params['prenomContact']
if 'mobileContact' in request.params:
mobileContact = request.params['mobileContact']
if 'telephoneContact' in request.params:
telephoneContact = request.params['telephoneContact']
if 'faxContact' in request.params:
faxContact = request.params['faxContact']
if 'courrielContact' in request.params:
courrielContact = request.params['courrielContact']
if 'remarque' in request.params:
remarque = request.params['remarque']
if 'dateDemande' in request.params:
dateDemande = request.params['dateDemande']
if 'dateOctroi' in request.params:
dateOctroi = request.params['dateOctroi']
"""
if 'ajoutePar' in request.params:
ajoutePar = request.params['ajoutePar']
if 'dateAjout' in request.params:
dateAjout = request.params['dateAjout']
if 'modifiePar' in request.params:
modifiePar = request.params['modifiePar']
if 'dateModification' in request.params:
dateModification = request.params['dateModification']
if 'dateSuppression' in request.params:
dateSuppression = request.params['dateSuppression']
"""
# Check date_debut, if less than 24h, urgence=true
"""
if not urgence and dateDebut != None and heureDebut != None:
date_time_str = str(dateDebut) + ' ' + str(heureDebut)
date_time_obj = datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S')
now = datetime.datetime.now()
if date_time_obj >= now and date_time_obj <= now + timedelta(days=1):
urgence = True
"""
# Geometries_reperages
if 'geometries_reperages' in request.params:
geometries_reperages = request.params['geometries_reperages']
# Read params autre evenement
if '_idMaitreOuvrage' in request.params:
_idMaitreOuvrage = request.params['_idMaitreOuvrage']
if '_idDirectionLocale' in request.params:
_idDirectionLocale = request.params['_idDirectionLocale']
if '_idEntrepreneur' in request.params:
_idEntrepreneur = request.params['_idEntrepreneur']
if '_idResponsableTravaux' in request.params:
_idResponsableTravaux = request.params['_idResponsableTravaux']
if '_cause' in request.params:
_cause = request.params['_cause']
if '_nomMaitreOuvrage' in request.params:
_nomMaitreOuvrage = request.params['_nomMaitreOuvrage']
if '_rueMaitreOuvrage' in request.params:
_rueMaitreOuvrage = request.params['_rueMaitreOuvrage']
if '_localiteMaitreOuvrage' in request.params:
_localiteMaitreOuvrage = request.params['_localiteMaitreOuvrage']
if '_telephoneMaitreOuvrage' in request.params:
_telephoneMaitreOuvrage = request.params['_telephoneMaitreOuvrage']
if '_faxMaitreOuvrage' in request.params:
_faxMaitreOuvrage = request.params['_faxMaitreOuvrage']
if '_courrielMaitreOuvrage' in request.params:
_courrielMaitreOuvrage = request.params['_courrielMaitreOuvrage']
if '_nomDirectionLocale' in request.params:
_nomDirectionLocale = request.params['_nomDirectionLocale']
if '_prenomDirectionLocale' in request.params:
_prenomDirectionLocale = request.params['_prenomDirectionLocale']
if '_mobileDirectionLocale' in request.params:
_mobileDirectionLocale = request.params['_mobileDirectionLocale']
if '_telephoneDirectionLocale' in request.params:
_telephoneDirectionLocale = request.params['_telephoneDirectionLocale']
if '_faxDirectionLocale' in request.params:
_faxDirectionLocale = request.params['_faxDirectionLocale']
if '_courrielDirectionLocale' in request.params:
_courrielDirectionLocale = request.params['_courrielDirectionLocale']
if '_nomEntrepreneur' in request.params:
_nomEntrepreneur = request.params['_nomEntrepreneur']
if '_rueEntrepreneur' in request.params:
_rueEntrepreneur = request.params['_rueEntrepreneur']
if '_localiteEntrepreneur' in request.params:
_localiteEntrepreneur = request.params['_localiteEntrepreneur']
if '_telephoneEntrepreneur' in request.params:
_telephoneEntrepreneur = request.params['_telephoneEntrepreneur']
if '_faxEntrepreneur' in request.params:
_faxEntrepreneur = request.params['_faxEntrepreneur']
if '_courrielEntrepreneur' in request.params:
_courrielEntrepreneur = request.params['_courrielEntrepreneur']
if '_nomResponsableTravaux' in request.params:
_nomResponsableTravaux = request.params['_nomResponsableTravaux']
if '_prenomResponsableTravaux' in request.params:
_prenomResponsableTravaux = request.params['_prenomResponsableTravaux']
if '_mobileResponsableTravaux' in request.params:
_mobileResponsableTravaux = request.params['_mobileResponsableTravaux']
if '_telephoneResponsableTravaux' in request.params:
_telephoneResponsableTravaux = request.params['_telephoneResponsableTravaux']
if '_faxResponsableTravaux' in request.params:
_faxResponsableTravaux = request.params['_faxResponsableTravaux']
if '_courrielResponsableTravaux' in request.params:
_courrielResponsableTravaux = request.params['_courrielResponsableTravaux']
if '_facturation' in request.params:
_facturation = request.params['_facturation']
'''
if '_coordonnesX' in request.params:
_coordonnesX = request.params['_coordonnesX']
if '_coordonnesY' in request.params:
_coordonnesY = request.params['_coordonnesY']
if '_commune' in request.params:
_commune = request.params['_commune']
if '_cadastre' in request.params:
_cadastre = request.params['_cadastre']
if '_bienFonds' in request.params:
_bienFonds = request.params['_bienFonds']
if '_autre_cadastre' in request.params:
_autre_cadastre = request.params['_autre_cadastre']
if '_autre_bienFonds' in request.params:
_autre_bienFonds = request.params['_autre_bienFonds']
if '_lieuDit' in request.params:
_lieuDit = request.params['_lieuDit']
'''
if '_dateDebutValide' in request.params:
_dateDebutValide = request.params['_dateDebutValide']
if '_dateFinValide' in request.params:
_dateFinValide = request.params['_dateFinValide']
if '_dateMajValide' in request.params:
_dateMajValide = request.params['_dateMajValide']
if '_numeroFacture' in request.params:
_numeroFacture = request.params['_numeroFacture']
if '_dateFacture' in request.params:
_dateFacture = request.params['_dateFacture']
if '_reserveEventuelle' in request.params:
_reserveEventuelle = request.params['_reserveEventuelle']
# Read params chantier
if '_idMaitreOuvrage' in request.params:
_idMaitreOuvrage = request.params['_idMaitreOuvrage']
if '_idDirectionLocale' in request.params:
_idDirectionLocale = request.params['_idDirectionLocale']
if '_idEntrepreneur' in request.params:
_idEntrepreneur = request.params['_idEntrepreneur']
if '_idResponsableTravaux' in request.params:
_idResponsableTravaux = request.params['_idResponsableTravaux']
if '_projet' in request.params:
_projet = request.params['_projet']
if '_longueurEtape' in request.params:
_longueurEtape = request.params['_longueurEtape']
if '_surface' in request.params:
_surface = request.params['_surface']
if '_idCentraleEnrobage' in request.params:
_idCentraleEnrobage = request.params['_idCentraleEnrobage']
if '_epaisseurCaisson' in request.params:
_epaisseurCaisson = request.params['_epaisseurCaisson']
if '_qualiteCaisson' in request.params:
_qualiteCaisson = request.params['_qualiteCaisson']
if '_epaisseurSupport' in request.params:
_epaisseurSupport = request.params['_epaisseurSupport']
if '_qualiteSupport' in request.params:
_qualiteSupport = request.params['_qualiteSupport']
if '_epaisseurRevetement' in request.params:
_epaisseurRevetement = request.params['_epaisseurRevetement']
if '_qualiteRevetement' in request.params:
_qualiteRevetement = request.params['_qualiteRevetement']
if '_qualiteEncollage' in request.params:
_qualiteEncollage = request.params['_qualiteEncollage']
if '_boucleInduction' in request.params:
_boucleInduction = request.params['_boucleInduction']
if _boucleInduction == 'true':
_boucleInduction = True
elif _boucleInduction == 'false':
_boucleInduction = False
else:
_boucleInduction = None
if '_faucherAccotement' in request.params:
_faucherAccotement = request.params['_faucherAccotement']
if _faucherAccotement == 'true':
_faucherAccotement = True
elif _faucherAccotement == 'false':
_faucherAccotement = False
else:
_faucherAccotement = None
if '_curerDepotoirs' in request.params:
_curerDepotoirs = request.params['_curerDepotoirs']
if _curerDepotoirs == 'true':
_curerDepotoirs = True
elif _curerDepotoirs == 'false':
_curerDepotoirs = False
else:
_curerDepotoirs = None
if '_nettoyer_bords' in request.params:
_nettoyer_bords = request.params['_nettoyer_bords']
if _nettoyer_bords == 'true':
_nettoyer_bords = True
elif _nettoyer_bords == 'false':
_nettoyer_bords = False
else:
_nettoyer_bords = None
if '_colmater_fissure' in request.params:
_colmater_fissure = request.params['_colmater_fissure']
if _colmater_fissure == 'true':
_colmater_fissure = True
elif _colmater_fissure == 'false':
_colmater_fissure = False
else:
_colmater_fissure = None
if '_prTouches' in request.params:
_prTouches = request.params['_prTouches']
if _prTouches == 'true':
_prTouches = True
elif _prTouches == 'false':
_prTouches = False
else:
_prTouches = None
if '_autre' in request.params:
_autre = request.params['_autre']
if '_lieuSeance' in request.params:
_lieuSeance = request.params['_lieuSeance']
if '_jourSeance' in request.params:
_jourSeance = request.params['_jourSeance']
if '_heureSeance' in request.params:
_heureSeance = request.params['_heureSeance']
if '_categories' in request.params:
_categories = request.params['_categories']
if _categories:
categories_array = json.loads(_categories)
if '_reperageEffectif' in request.params:
_reperageEffectif = request.params['_reperageEffectif']
if _reperageEffectif == 'true':
_reperageEffectif = True
elif _reperageEffectif == 'false':
_reperageEffectif = False
else:
_reperageEffectif = None
# Read params fouille
if '_idMaitreOuvrage' in request.params:
_idMaitreOuvrage = request.params['_idMaitreOuvrage']
if '_idDirectionLocale' in request.params:
_idDirectionLocale = request.params['_idDirectionLocale']
if '_idEntrepreneur' in request.params:
_idEntrepreneur = request.params['_idEntrepreneur']
if '_idResponsableTravaux' in request.params:
_idResponsableTravaux = request.params['_idResponsableTravaux']
if '_nomMaitreOuvrage' in request.params:
_nomMaitreOuvrage = request.params['_nomMaitreOuvrage']
if '_rueMaitreOuvrage' in request.params:
_rueMaitreOuvrage = request.params['_rueMaitreOuvrage']
if '_localiteMaitreOuvrage' in request.params:
_localiteMaitreOuvrage = request.params['_localiteMaitreOuvrage']
if '_telephoneMaitreOuvrage' in request.params:
_telephoneMaitreOuvrage = request.params['_telephoneMaitreOuvrage']
if '_faxMaitreOuvrage' in request.params:
_faxMaitreOuvrage = request.params['_faxMaitreOuvrage']
if '_courrielMaitreOuvrage' in request.params:
_courrielMaitreOuvrage = request.params['_courrielMaitreOuvrage']
if '_nomDirectionLocale' in request.params:
_nomDirectionLocale = request.params['_nomDirectionLocale']
if '_prenomDirectionLocale' in request.params:
_prenomDirectionLocale = request.params['_prenomDirectionLocale']
if '_mobileDirectionLocale' in request.params:
_mobileDirectionLocale = request.params['_mobileDirectionLocale']
if '_telephoneDirectionLocale' in request.params:
_telephoneDirectionLocale = request.params['_telephoneDirectionLocale']
if '_faxDirectionLocale' in request.params:
_faxDirectionLocale = request.params['_faxDirectionLocale']
if '_courrielDirectionLocale' in request.params:
_courrielDirectionLocale = request.params['_courrielDirectionLocale']
if '_nomEntrepreneur' in request.params:
_nomEntrepreneur = request.params['_nomEntrepreneur']
if '_rueEntrepreneur' in request.params:
_rueEntrepreneur = request.params['_rueEntrepreneur']
if '_localiteEntrepreneur' in request.params:
_localiteEntrepreneur = request.params['_localiteEntrepreneur']
if '_telephoneEntrepreneur' in request.params:
_telephoneEntrepreneur = request.params['_telephoneEntrepreneur']
if '_faxEntrepreneur' in request.params:
_faxEntrepreneur = request.params['_faxEntrepreneur']
if '_courrielEntrepreneur' in request.params:
_courrielEntrepreneur = request.params['_courrielEntrepreneur']
if '_nomResponsableTravaux' in request.params:
_nomResponsableTravaux = request.params['_nomResponsableTravaux']
if '_prenomResponsableTravaux' in request.params:
_prenomResponsableTravaux = request.params['_prenomResponsableTravaux']
if '_mobileResponsableTravaux' in request.params:
_mobileResponsableTravaux = request.params['_mobileResponsableTravaux']
if '_telephoneResponsableTravaux' in request.params:
_telephoneResponsableTravaux = request.params['_telephoneResponsableTravaux']
if '_faxResponsableTravaux' in request.params:
_faxResponsableTravaux = request.params['_faxResponsableTravaux']
if '_courrielResponsableTravaux' in request.params:
_courrielResponsableTravaux = request.params['_courrielResponsableTravaux']
if '_facturation' in request.params:
_facturation = request.params['_facturation']
if '_coordonnesX' in request.params:
_coordonnesX = request.params['_coordonnesX']
if '_coordonnesY' in request.params:
_coordonnesY = request.params['_coordonnesY']
if '_commune' in request.params:
_commune = request.params['_commune']
if '_cadastre' in request.params:
_cadastre = request.params['_cadastre']
if '_bienFonds' in request.params:
_bienFonds = request.params['_bienFonds']
if '_autreCadastre' in request.params:
_autreCadastre = request.params['_autreCadastre']
if '_autreBienFonds' in request.params:
_autreBienFonds = request.params['_autreBienFonds']
if '_lieuDit' in request.params:
_lieuDit = request.params['_lieuDit']
if '_prTouches' in request.params:
_prTouches = request.params['_prTouches']
if _prTouches == 'true':
_prTouches = True
elif _prTouches == 'false':
_prTouches = False
else:
_prTouches = None
if '_longueurEtape' in request.params:
_longueurEtape = request.params['_longueurEtape']
if '_epaisseurCaisson' in request.params:
_epaisseurCaisson = request.params['_epaisseurCaisson']
if '_qualiteCaisson' in request.params:
_qualiteCaisson = request.params['_qualiteCaisson']
if '_epaisseurSupport' in request.params:
_epaisseurSupport = request.params['_epaisseurSupport']
if '_qualiteSupport' in request.params:
_qualiteSupport = request.params['_qualiteSupport']
if '_epaisseurRevetement' in request.params:
_epaisseurRevetement = request.params['_epaisseurRevetement']
if '_qualiteRevetement' in request.params:
_qualiteRevetement = request.params['_qualiteRevetement']
if '_qualiteEncollage' in request.params:
_qualiteEncollage = request.params['_qualiteEncollage']
if '_dateDebutValide' in request.params:
_dateDebutValide = request.params['_dateDebutValide']
if '_dateFinValide' in request.params:
_dateFinValide = request.params['_dateFinValide']
if '_dateMajValide' in request.params:
_dateMajValide = request.params['_dateMajValide']
if '_numeroFacture' in request.params:
_numeroFacture = request.params['_numeroFacture']
if '_dateFacture' in request.params:
_dateFacture = request.params['_dateFacture']
if '_reserveEventuelle' in request.params:
_reserveEventuelle = request.params['_reserveEventuelle']
if '_planTypes' in request.params:
_planTypes = request.params['_planTypes']
if _planTypes:
plan_types_array = json.loads(_planTypes)
if '_reperageEffectif' in request.params:
_reperageEffectif = request.params['_reperageEffectif']
if _reperageEffectif == 'true':
_reperageEffectif = True
elif _reperageEffectif == 'false':
_reperageEffectif = False
else:
_reperageEffectif = None
# Read params manifestation
if '_parcours' in request.params:
_parcours = request.params['_parcours']
with transaction.manager:
evenement_model = models.Evenement(
id_entite=idEntite,
id_responsable=idResponsable,
id_requerant=idRequerant,
type=type,
numero_dossier=numeroDossier,
division=division,
libelle=libelle,
description=description,
prevision=prevision,
#urgence=urgence,
date_debut=dateDebut,
heure_debut=heureDebut,
date_fin=dateFin,
heure_fin=heureFin,
localisation=localisation,
# localite=localite,
# lieu_dit=lieuDit,
# reperage_effectif=reperageEffectif,
nom_requerant=nomRequerant,
rue_requerant=rueRequerant,
localite_requerant=localiteRequerant,
telephone_requerant=telephoneRequerant,
fax_requerant=faxRequerant,
courriel_requerant=courrielRequerant,
nom_contact=nomContact,
prenom_contact=prenomContact,
mobile_contact=mobileContact,
telephone_contact=telephoneContact,
fax_contact=faxContact,
courriel_contact=courrielContact,
remarque=remarque,
date_demande=dateDemande,
date_octroi=dateOctroi,
id_utilisateur_ajout=current_user_id,
# date_ajout=dateAjout
id_utilisateur_modification=current_user_id
# date_modification=dateModification,
# date_suppression=dateSuppression
)
request.dbsession.add(evenement_model)
request.dbsession.flush()
max_event_id = evenement_model.id
# Related model
related_model = None
# evenement_point_model = None
# evenement_ligne_model = None
# evenement_polygone_model = None
# Type evenement : autre
if int(type) == int(settings['autre_evenement_id']):
related_model = models.AutreEvenement(
id_evenement=max_event_id,
id_maitre_ouvrage=_idMaitreOuvrage,
id_direction_locale=_idDirectionLocale,
id_entrepreneur=_idEntrepreneur,
id_responsable_travaux=_idResponsableTravaux,
cause=_cause,
nom_maitre_ouvrage=_nomMaitreOuvrage,
rue_maitre_ouvrage=_rueMaitreOuvrage,
localite_maitre_ouvrage=_localiteMaitreOuvrage,
telephone_maitre_ouvrage=_telephoneMaitreOuvrage,
fax_maitre_ouvrage=_faxMaitreOuvrage,
courriel_maitre_ouvrage=_courrielMaitreOuvrage,
nom_direction_locale=_nomDirectionLocale,
prenom_direction_locale=_prenomDirectionLocale,
mobile_direction_locale=_mobileDirectionLocale,
telephone_direction_locale=_telephoneDirectionLocale,
fax_direction_locale=_faxDirectionLocale,
courriel_direction_locale=_courrielDirectionLocale,
nom_entrepreneur=_nomEntrepreneur,
rue_entrepreneur=_rueEntrepreneur,
localite_entrepreneur=_localiteEntrepreneur,
telephone_entrepreneur=_telephoneEntrepreneur,
fax_entrepreneur=_faxEntrepreneur,
courriel_entrepreneur=_courrielEntrepreneur,
nom_responsable_travaux=_nomResponsableTravaux,
prenom_responsable_travaux=_prenomResponsableTravaux,
mobile_responsable_travaux=_mobileResponsableTravaux,
telephone_responsable_travaux=_telephoneResponsableTravaux,
fax_responsable_travaux=_faxResponsableTravaux,
courriel_responsable_travaux=_courrielResponsableTravaux,
facturation=_facturation,
# coordonnes_x=_coordonnesX,
# coordonnes_y=_coordonnesY,
# commune=_commune,
# cadastre=_cadastre,
# bien_fonds=_bienFonds,
# autre_cadastre=_autre_cadastre,
# autre_bien_fonds=_autre_bienFonds,
# lieu_dit=_lieuDit,
date_debut_valide=_dateDebutValide,
date_fin_valide=_dateFinValide,
date_maj_valide=_dateMajValide,
numero_facture=_numeroFacture,
date_facture=_dateFacture,
reserve_eventuelle=_reserveEventuelle
)
# Type evenement : Chantier
elif int(type) == int(settings['chantier_evenement_id']):
related_model = models.Chantier(
id_evenement=max_event_id,
id_maitre_ouvrage=_idMaitreOuvrage,
id_direction_locale=_idDirectionLocale,
id_entrepreneur=_idEntrepreneur,
id_responsable_travaux=_idResponsableTravaux,
projet=_projet,
longueur_etape=_longueurEtape,
surface=_surface,
id_centrale_enrobage=_idCentraleEnrobage,
epaisseur_caisson=_epaisseurCaisson,
qualite_caisson=_qualiteCaisson,
epaisseur_support=_epaisseurSupport,
qualite_support=_qualiteSupport,
epaisseur_revetement=_epaisseurRevetement,
qualite_revetement=_qualiteRevetement,
qualite_encollage=_qualiteEncollage,
boucle_induction=_boucleInduction,
faucher_accotement=_faucherAccotement,
curer_depotoirs=_curerDepotoirs,
nettoyer_bords=_nettoyer_bords,
colmater_fissure=_colmater_fissure,
pr_touches=_prTouches,
autre=_autre,
lieu_seance=_lieuSeance,
jour_seance=_jourSeance,
heure_seance=_heureSeance,
reperage_effectif=_reperageEffectif
)
# Type evenement : Fouille
elif int(type) == int(settings['fouille_evenement_id']):
related_model = models.Fouille(
id_evenement=max_event_id,
id_maitre_ouvrage=_idMaitreOuvrage,
id_direction_locale=_idDirectionLocale,
id_entrepreneur=_idEntrepreneur,
id_responsable_travaux=_idResponsableTravaux,
nom_maitre_ouvrage=_nomMaitreOuvrage,
rue_maitre_ouvrage=_rueMaitreOuvrage,
localite_maitre_ouvrage=_localiteMaitreOuvrage,
telephone_maitre_ouvrage=_telephoneMaitreOuvrage,
fax_maitre_ouvrage=_faxMaitreOuvrage,
courriel_maitre_ouvrage=_courrielMaitreOuvrage,
nom_direction_locale=_nomDirectionLocale,
prenom_direction_locale=_prenomDirectionLocale,
mobile_direction_locale=_mobileDirectionLocale,
telephone_direction_locale=_telephoneDirectionLocale,
fax_direction_locale=_faxDirectionLocale,
courriel_direction_locale=_courrielDirectionLocale,
nom_entrepreneur=_nomEntrepreneur,
rue_entrepreneur=_rueEntrepreneur,
localite_entrepreneur=_localiteEntrepreneur,
telephone_entrepreneur=_telephoneEntrepreneur,
fax_entrepreneur=_faxEntrepreneur,
courriel_entrepreneur=_courrielEntrepreneur,
nom_responsable_travaux=_nomResponsableTravaux,
prenom_responsable_travaux=_prenomResponsableTravaux,
mobile_responsable_travaux=_mobileResponsableTravaux,
telephone_responsable_travaux=_telephoneResponsableTravaux,
fax_responsable_travaux=_faxResponsableTravaux,
courriel_responsable_travaux=_courrielResponsableTravaux,
facturation=_facturation,
# coordonnes_x=_coordonnesX,
# coordonnes_y=_coordonnesY,
# commune=_commune,
# cadastre=_cadastre,
# bien_fonds=_bienFonds,
# autre_cadastre=_autreCadastre,
# autre_bien_fonds=_autreBienFonds,
# lieu_dit=_lieuDit,
pr_touches=_prTouches,
longueur_etape=_longueurEtape,
epaisseur_caisson=_epaisseurCaisson,
qualite_caisson=_qualiteCaisson,
epaisseur_support=_epaisseurSupport,
qualite_support=_qualiteSupport,
epaisseur_revetement=_epaisseurRevetement,
qualite_revetement=_qualiteRevetement,
qualite_encollage=_qualiteEncollage,
# plan_type=_planType,
date_debut_valide=_dateDebutValide,
date_fin_valide=_dateFinValide,
date_maj_valide=_dateMajValide,
numero_facture=_numeroFacture,
date_facture=_dateFacture,
reserve_eventuelle=_reserveEventuelle,
reperage_effectif=_reperageEffectif
)
# Type evenement : Manifestation
elif int(type) == int(settings['manifestation_evenement_id']):
related_model = models.Manifestation(
id_evenement=max_event_id,
parcours=_parcours)
# Geometries_reperages
if geometries_reperages != None:
json_geometries_reperages = json.loads(geometries_reperages)
for onegeojson in json_geometries_reperages:
# Geometry
if 'geometry' in onegeojson:
geometry = onegeojson['geometry']
if 'type' in geometry:
type_geom = geometry['type']
# Point
if type_geom == 'Point':
evenement_point_model = models.EvenementPoint(id_evenement=max_event_id)
evenement_point_model.set_json_geometry(str(geometry), settings['srid'])
request.dbsession.add(evenement_point_model)
# Line
elif type_geom == 'LineString' or type_geom == 'MultiLineString' or type_geom == 'GeometryCollection':
evenement_ligne_model = models.EvenementLigne(id_evenement=max_event_id)
evenement_ligne_model.set_json_geometry(str(geometry), settings['srid'])
request.dbsession.add(evenement_ligne_model)
if 'reperage' in onegeojson:
request.dbsession.flush()
reperage = onegeojson['reperage']
reperage_model = models.Reperage(
id_evenement_ligne=evenement_ligne_model.id,
id_deviation=reperage['idDeviation'],
proprietaire=reperage['proprietaire'],
axe=reperage['axe'],
sens=reperage['sens'],
pr_debut=reperage['prDebut'],
pr_debut_distance=reperage['prDebutDistance'],
pr_fin=reperage['prFin'],
pr_fin_distance=reperage['prFinDistance'],
ecartd=reperage['ecartd'],
ecartf=reperage['ecartf'],
usage_neg=reperage['usageNeg'],
f_surf=reperage['fSurf'],
f_long=reperage['fLong']
)
request.dbsession.add(reperage_model)
# Polygon
elif type_geom == 'Polygon':
evenement_polygon_model = models.EvenementPolygone(id_evenement=max_event_id)
evenement_polygon_model.set_json_geometry(str(geometry), settings['srid'])
request.dbsession.add(evenement_polygon_model)
# Commit transaction
request.dbsession.add(related_model)
request.dbsession.flush()
# Reperage
# Categories chantiers / plan type fouille
#with transaction.manager:
# Type evenement : Chantier
if int(type) == int(settings['chantier_evenement_id']):
if categories_array and len(categories_array) > 0:
for category_id in categories_array:
lien_categ_chant = models.LienChantierCategorieChantier(
id_chantier=related_model.id,
categorie=category_id
)
request.dbsession.add(lien_categ_chant)
# Type evenement : Fouille
elif int(type) == int(settings['fouille_evenement_id']):
if plan_types_array and len(plan_types_array) > 0:
for plan_type_id in plan_types_array:
lien_fouille_plan = models.LienFouillePlanType(
id_evenement=max_event_id,
id_plan_type=plan_type_id
)
request.dbsession.add(lien_fouille_plan)
#Commit transaction
transaction.commit()
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as e:
# transaction.abort()
request.dbsession.rollback()
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return {'id': max_event_id, 'message': 'Data successfully saved'}
########################################################
# Update Evenement edition
########################################################
@view_config(route_name='evenement_edition', request_method='PUT', renderer='json')
@view_config(route_name='evenement_edition_slash', request_method='PUT', renderer='json')
def update_evenement_edition(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
# Check authorization
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
current_user_id = Utils.get_connected_user_id(request)
current_user_id = int(current_user_id) if current_user_id else None
if current_user_id is None:
raise HTTPForbidden()
plan_types_array = []
categories_array = []
# Default params value
"""Evenement"""
idEvenement = None
idEntite = None
idResponsable = None
idRequerant = None
type = None
numeroDossier = None
division = None
libelle = None
description = None
prevision = None
#urgence = None
dateDebut = None
heureDebut = None
dateFin = None
heureFin = None
localisation = None
# localite = None
# lieuDit = None
# reperageEffectif = None
nomRequerant = None
rueRequerant = None
localiteRequerant = None
telephoneRequerant = None
faxRequerant = None
courrielRequerant = None
nomContact = None
prenomContact = None
mobileContact = None
telephoneContact = None
faxContact = None
courrielContact = None
remarque = None
dateDemande = None
dateOctroi = None
# ajoutePar = None
# dateAjout = None
#modifiePar = None
# dateModification = None
# dateSuppression = None
geometries_reperages = None
"""Autre evenement"""
_idMaitreOuvrage = None
_idDirectionLocale = None
_idEntrepreneur = None
_idResponsableTravaux = None
_cause = None
_nomMaitreOuvrage = None
_rueMaitreOuvrage = None
_localiteMaitreOuvrage = None
_telephoneMaitreOuvrage = None
_faxMaitreOuvrage = None
_courrielMaitreOuvrage = None
_nomDirectionLocale = None
_prenomDirectionLocale = None
_mobileDirectionLocale = None
_telephoneDirectionLocale = None
_faxDirectionLocale = None
_courrielDirectionLocale = None
_nomEntrepreneur = None
_rueEntrepreneur = None
_localiteEntrepreneur = None
_telephoneEntrepreneur = None
_faxEntrepreneur = None
_courrielEntrepreneur = None
_nomResponsableTravaux = None
_prenomResponsableTravaux = None
_mobileResponsableTravaux = None
_telephoneResponsableTravaux = None
_faxResponsableTravaux = None
_courrielResponsableTravaux = None
_facturation = None
'''
_coordonnesX = None
_coordonnesY = None
_commune = None
_cadastre = None
_bienFonds = None
_autre_cadastre = None
_autre_bienFonds = None
_lieuDit = None
'''
_dateDebutValide = None
_dateFinValide = None
_dateMajValide = None
_numeroFacture = None
_dateFacture = None
_reserveEventuelle = None
"""Chantier"""
_idMaitreOuvrage = None
_idDirectionLocale = None
_idEntrepreneur = None
_idResponsableTravaux = None
_projet = None
_longueurEtape = None
_surface = None
_idCentraleEnrobage = None
_epaisseurCaisson = None
_qualiteCaisson = None
_epaisseurSupport = None
_qualiteSupport = None
_epaisseurRevetement = None
_qualiteRevetement = None
_qualiteEncollage = None
_boucleInduction = None
_faucherAccotement = None
_curerDepotoirs = None
_nettoyer_bords = None
_colmater_fissure = None
_prTouches = None
_autre = None
_lieuSeance = None
_jourSeance = None
_heureSeance = None
_categories = None
_reperageEffectif = None
"""Fouille"""
_idMaitreOuvrage = None
_idDirectionLocale = None
_idEntrepreneur = None
_idResponsableTravaux = None
_nomMaitreOuvrage = None
_rueMaitreOuvrage = None
_localiteMaitreOuvrage = None
_telephoneMaitreOuvrage = None
_faxMaitreOuvrage = None
_courrielMaitreOuvrage = None
_nomDirectionLocale = None
_prenomDirectionLocale = None
_mobileDirectionLocale = None
_telephoneDirectionLocale = None
_faxDirectionLocale = None
_courrielDirectionLocale = None
_nomEntrepreneur = None
_rueEntrepreneur = None
_localiteEntrepreneur = None
_telephoneEntrepreneur = None
_faxEntrepreneur = None
_courrielEntrepreneur = None
_nomResponsableTravaux = None
_prenomResponsableTravaux = None
_mobileResponsableTravaux = None
_telephoneResponsableTravaux = None
_faxResponsableTravaux = None
_courrielResponsableTravaux = None
_facturation = None
'''
_coordonnesX = None
_coordonnesY = None
_commune = None
_cadastre = None
_bienFonds = None
_autreCadastre = None
_autreBienFonds = None
_lieuDit = None
'''
_prTouches = None
_longueurEtape = None
_epaisseurCaisson = None
_qualiteCaisson = None
_epaisseurSupport = None
_qualiteSupport = None
_epaisseurRevetement = None
_qualiteRevetement = None
_qualiteEncollage = None
_dateDebutValide = None
_dateFinValide = None
_dateMajValide = None
_numeroFacture = None
_dateFacture = None
_reserveEventuelle = None
_planTypes = None
_reperageEffectif = None
"""Manifestation"""
_parcours = None
# Read params evenement
# Read perturbation params
if 'idEvenement' in request.params:
idEvenement = request.params['idEvenement']
if not idEvenement:
raise Exception('Id evenement is null')
# Check if the user has permission to update evenement
user_can_update_evenement = Utils.user_can_update_evenement(request, current_user_id, idEvenement)
if not user_can_update_evenement:
raise HTTPForbidden()
if 'idEntite' in request.params:
idEntite = request.params['idEntite']
if 'idResponsable' in request.params:
idResponsable = request.params['idResponsable']
if 'idRequerant' in request.params:
idRequerant = request.params['idRequerant']
if 'type' in request.params:
type = request.params['type']
if 'numeroDossier' in request.params:
numeroDossier = request.params['numeroDossier']
if 'division' in request.params:
division = request.params['division']
if 'libelle' in request.params:
libelle = request.params['libelle']
if 'description' in request.params:
description = request.params['description']
if 'prevision' in request.params:
prevision = request.params['prevision']
if prevision == 'true':
prevision = True
elif prevision == 'false':
prevision = False
else:
prevision = None
"""
if 'urgence' in request.params:
urgence = request.params['urgence']
if urgence == 'true':
urgence = True
elif urgence == 'false':
urgence = False
else:
urgence = None
"""
if 'dateDebut' in request.params:
dateDebut = request.params['dateDebut']
if 'heureDebut' in request.params:
heureDebut = request.params['heureDebut']
if 'dateFin' in request.params:
dateFin = request.params['dateFin']
if 'heureFin' in request.params:
heureFin = request.params['heureFin']
if 'localisation' in request.params:
localisation = request.params['localisation']
'''
if 'localite' in request.params:
localite = request.params['localite']
if 'lieuDit' in request.params:
lieuDit = request.params['lieuDit']
if 'reperageEffectif' in request.params:
reperageEffectif = request.params['reperageEffectif']
if reperageEffectif == 'true':
reperageEffectif = True
elif reperageEffectif == 'false':
reperageEffectif = False
else:
reperageEffectif = None
'''
if 'nomRequerant' in request.params:
nomRequerant = request.params['nomRequerant']
if 'rueRequerant' in request.params:
rueRequerant = request.params['rueRequerant']
if 'localiteRequerant' in request.params:
localiteRequerant = request.params['localiteRequerant']
if 'telephoneRequerant' in request.params:
telephoneRequerant = request.params['telephoneRequerant']
if 'faxRequerant' in request.params:
faxRequerant = request.params['faxRequerant']
if 'courrielRequerant' in request.params:
courrielRequerant = request.params['courrielRequerant']
if 'nomContact' in request.params:
nomContact = request.params['nomContact']
if 'prenomContact' in request.params:
prenomContact = request.params['prenomContact']
if 'mobileContact' in request.params:
mobileContact = request.params['mobileContact']
if 'telephoneContact' in request.params:
telephoneContact = request.params['telephoneContact']
if 'faxContact' in request.params:
faxContact = request.params['faxContact']
if 'courrielContact' in request.params:
courrielContact = request.params['courrielContact']
if 'remarque' in request.params:
remarque = request.params['remarque']
if 'dateDemande' in request.params:
dateDemande = request.params['dateDemande']
if 'dateOctroi' in request.params:
dateOctroi = request.params['dateOctroi']
"""
if 'ajoutePar' in request.params:
ajoutePar = request.params['ajoutePar']
if 'dateAjout' in request.params:
dateAjout = request.params['dateAjout']
"""
if 'modifiePar' in request.params:
modifiePar = request.params['modifiePar']
"""
if 'dateModification' in request.params:
dateModification = request.params['dateModification']
if 'dateSuppression' in request.params:
dateSuppression = request.params['dateSuppression']
"""
# Check date_debut, if less than 24h, urgence=true
""""
if not urgence and dateDebut != None and heureDebut != None:
date_time_str = str(dateDebut) + ' ' + str(heureDebut)
date_time_obj = datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S')
now = datetime.datetime.now()
if date_time_obj >= now and date_time_obj <= now + timedelta(days=1):
urgence = True
"""
if 'geometries_reperages' in request.params:
geometries_reperages = request.params['geometries_reperages']
# Read params autre evenement
if '_idMaitreOuvrage' in request.params:
_idMaitreOuvrage = request.params['_idMaitreOuvrage']
if '_idDirectionLocale' in request.params:
_idDirectionLocale = request.params['_idDirectionLocale']
if '_idEntrepreneur' in request.params:
_idEntrepreneur = request.params['_idEntrepreneur']
if '_idResponsableTravaux' in request.params:
_idResponsableTravaux = request.params['_idResponsableTravaux']
if '_cause' in request.params:
_cause = request.params['_cause']
if '_nomMaitreOuvrage' in request.params:
_nomMaitreOuvrage = request.params['_nomMaitreOuvrage']
if '_rueMaitreOuvrage' in request.params:
_rueMaitreOuvrage = request.params['_rueMaitreOuvrage']
if '_localiteMaitreOuvrage' in request.params:
_localiteMaitreOuvrage = request.params['_localiteMaitreOuvrage']
if '_telephoneMaitreOuvrage' in request.params:
_telephoneMaitreOuvrage = request.params['_telephoneMaitreOuvrage']
if '_faxMaitreOuvrage' in request.params:
_faxMaitreOuvrage = request.params['_faxMaitreOuvrage']
if '_courrielMaitreOuvrage' in request.params:
_courrielMaitreOuvrage = request.params['_courrielMaitreOuvrage']
if '_nomDirectionLocale' in request.params:
_nomDirectionLocale = request.params['_nomDirectionLocale']
if '_prenomDirectionLocale' in request.params:
_prenomDirectionLocale = request.params['_prenomDirectionLocale']
if '_mobileDirectionLocale' in request.params:
_mobileDirectionLocale = request.params['_mobileDirectionLocale']
if '_telephoneDirectionLocale' in request.params:
_telephoneDirectionLocale = request.params['_telephoneDirectionLocale']
if '_faxDirectionLocale' in request.params:
_faxDirectionLocale = request.params['_faxDirectionLocale']
if '_courrielDirectionLocale' in request.params:
_courrielDirectionLocale = request.params['_courrielDirectionLocale']
if '_nomEntrepreneur' in request.params:
_nomEntrepreneur = request.params['_nomEntrepreneur']
if '_rueEntrepreneur' in request.params:
_rueEntrepreneur = request.params['_rueEntrepreneur']
if '_localiteEntrepreneur' in request.params:
_localiteEntrepreneur = request.params['_localiteEntrepreneur']
if '_telephoneEntrepreneur' in request.params:
_telephoneEntrepreneur = request.params['_telephoneEntrepreneur']
if '_faxEntrepreneur' in request.params:
_faxEntrepreneur = request.params['_faxEntrepreneur']
if '_courrielEntrepreneur' in request.params:
_courrielEntrepreneur = request.params['_courrielEntrepreneur']
if '_nomResponsableTravaux' in request.params:
_nomResponsableTravaux = request.params['_nomResponsableTravaux']
if '_prenomResponsableTravaux' in request.params:
_prenomResponsableTravaux = request.params['_prenomResponsableTravaux']
if '_mobileResponsableTravaux' in request.params:
_mobileResponsableTravaux = request.params['_mobileResponsableTravaux']
if '_telephoneResponsableTravaux' in request.params:
_telephoneResponsableTravaux = request.params['_telephoneResponsableTravaux']
if '_faxResponsableTravaux' in request.params:
_faxResponsableTravaux = request.params['_faxResponsableTravaux']
if '_courrielResponsableTravaux' in request.params:
_courrielResponsableTravaux = request.params['_courrielResponsableTravaux']
if '_facturation' in request.params:
_facturation = request.params['_facturation']
'''
if '_coordonnesX' in request.params:
_coordonnesX = request.params['_coordonnesX']
if '_coordonnesY' in request.params:
_coordonnesY = request.params['_coordonnesY']
if '_commune' in request.params:
_commune = request.params['_commune']
if '_cadastre' in request.params:
_cadastre = request.params['_cadastre']
if '_bienFonds' in request.params:
_bienFonds = request.params['_bienFonds']
if '_autre_cadastre' in request.params:
_autre_cadastre = request.params['_autre_cadastre']
if '_autre_bienFonds' in request.params:
_autre_bienFonds = request.params['_autre_bienFonds']
if '_lieuDit' in request.params:
_lieuDit = request.params['_lieuDit']
'''
if '_dateDebutValide' in request.params:
_dateDebutValide = request.params['_dateDebutValide']
if '_dateFinValide' in request.params:
_dateFinValide = request.params['_dateFinValide']
if '_dateMajValide' in request.params:
_dateMajValide = request.params['_dateMajValide']
if '_numeroFacture' in request.params:
_numeroFacture = request.params['_numeroFacture']
if '_dateFacture' in request.params:
_dateFacture = request.params['_dateFacture']
if '_reserveEventuelle' in request.params:
_reserveEventuelle = request.params['_reserveEventuelle']
# Read params chantier
if '_idMaitreOuvrage' in request.params:
_idMaitreOuvrage = request.params['_idMaitreOuvrage']
if '_idDirectionLocale' in request.params:
_idDirectionLocale = request.params['_idDirectionLocale']
if '_idEntrepreneur' in request.params:
_idEntrepreneur = request.params['_idEntrepreneur']
if '_idResponsableTravaux' in request.params:
_idResponsableTravaux = request.params['_idResponsableTravaux']
if '_projet' in request.params:
_projet = request.params['_projet']
if '_longueurEtape' in request.params:
_longueurEtape = request.params['_longueurEtape']
if '_surface' in request.params:
_surface = request.params['_surface']
if '_idCentraleEnrobage' in request.params:
_idCentraleEnrobage = request.params['_idCentraleEnrobage']
if '_epaisseurCaisson' in request.params:
_epaisseurCaisson = request.params['_epaisseurCaisson']
if '_qualiteCaisson' in request.params:
_qualiteCaisson = request.params['_qualiteCaisson']
if '_epaisseurSupport' in request.params:
_epaisseurSupport = request.params['_epaisseurSupport']
if '_qualiteSupport' in request.params:
_qualiteSupport = request.params['_qualiteSupport']
if '_epaisseurRevetement' in request.params:
_epaisseurRevetement = request.params['_epaisseurRevetement']
if '_qualiteRevetement' in request.params:
_qualiteRevetement = request.params['_qualiteRevetement']
if '_qualiteEncollage' in request.params:
_qualiteEncollage = request.params['_qualiteEncollage']
if '_boucleInduction' in request.params:
_boucleInduction = request.params['_boucleInduction']
if _boucleInduction == 'true':
_boucleInduction = True
elif _boucleInduction == 'false':
_boucleInduction = False
else:
_boucleInduction = None
if '_faucherAccotement' in request.params:
_faucherAccotement = request.params['_faucherAccotement']
if _faucherAccotement == 'true':
_faucherAccotement = True
elif _faucherAccotement == 'false':
_faucherAccotement = False
else:
_faucherAccotement = None
if '_curerDepotoirs' in request.params:
_curerDepotoirs = request.params['_curerDepotoirs']
if _curerDepotoirs == 'true':
_curerDepotoirs = True
elif _curerDepotoirs == 'false':
_curerDepotoirs = False
else:
_curerDepotoirs = None
if '_nettoyer_bords' in request.params:
_nettoyer_bords = request.params['_nettoyer_bords']
if _nettoyer_bords == 'true':
_nettoyer_bords = True
elif _nettoyer_bords == 'false':
_nettoyer_bords = False
else:
_nettoyer_bords = None
if '_colmater_fissure' in request.params:
_colmater_fissure = request.params['_colmater_fissure']
if _colmater_fissure == 'true':
_colmater_fissure = True
elif _colmater_fissure == 'false':
_colmater_fissure = False
else:
_colmater_fissure = None
if '_prTouches' in request.params:
_prTouches = request.params['_prTouches']
if _prTouches == 'true':
_prTouches = True
elif _prTouches == 'false':
_prTouches = False
else:
_prTouches = None
if '_autre' in request.params:
_autre = request.params['_autre']
if '_lieuSeance' in request.params:
_lieuSeance = request.params['_lieuSeance']
if '_jourSeance' in request.params:
_jourSeance = request.params['_jourSeance']
if '_heureSeance' in request.params:
_heureSeance = request.params['_heureSeance']
if '_reperageEffectif' in request.params:
_reperageEffectif = request.params['_reperageEffectif']
if _reperageEffectif == 'true':
_reperageEffectif = True
elif _reperageEffectif == 'false':
_reperageEffectif = False
else:
_reperageEffectif = None
if '_categories' in request.params:
_categories = request.params['_categories']
if _categories:
categories_array = json.loads(_categories)
# Read params fouille
if '_idMaitreOuvrage' in request.params:
_idMaitreOuvrage = request.params['_idMaitreOuvrage']
if '_idDirectionLocale' in request.params:
_idDirectionLocale = request.params['_idDirectionLocale']
if '_idEntrepreneur' in request.params:
_idEntrepreneur = request.params['_idEntrepreneur']
if '_idResponsableTravaux' in request.params:
_idResponsableTravaux = request.params['_idResponsableTravaux']
if '_nomMaitreOuvrage' in request.params:
_nomMaitreOuvrage = request.params['_nomMaitreOuvrage']
if '_rueMaitreOuvrage' in request.params:
_rueMaitreOuvrage = request.params['_rueMaitreOuvrage']
if '_localiteMaitreOuvrage' in request.params:
_localiteMaitreOuvrage = request.params['_localiteMaitreOuvrage']
if '_telephoneMaitreOuvrage' in request.params:
_telephoneMaitreOuvrage = request.params['_telephoneMaitreOuvrage']
if '_faxMaitreOuvrage' in request.params:
_faxMaitreOuvrage = request.params['_faxMaitreOuvrage']
if '_courrielMaitreOuvrage' in request.params:
_courrielMaitreOuvrage = request.params['_courrielMaitreOuvrage']
if '_nomDirectionLocale' in request.params:
_nomDirectionLocale = request.params['_nomDirectionLocale']
if '_prenomDirectionLocale' in request.params:
_prenomDirectionLocale = request.params['_prenomDirectionLocale']
if '_mobileDirectionLocale' in request.params:
_mobileDirectionLocale = request.params['_mobileDirectionLocale']
if '_telephoneDirectionLocale' in request.params:
_telephoneDirectionLocale = request.params['_telephoneDirectionLocale']
if '_faxDirectionLocale' in request.params:
_faxDirectionLocale = request.params['_faxDirectionLocale']
if '_courrielDirectionLocale' in request.params:
_courrielDirectionLocale = request.params['_courrielDirectionLocale']
if '_nomEntrepreneur' in request.params:
_nomEntrepreneur = request.params['_nomEntrepreneur']
if '_rueEntrepreneur' in request.params:
_rueEntrepreneur = request.params['_rueEntrepreneur']
if '_localiteEntrepreneur' in request.params:
_localiteEntrepreneur = request.params['_localiteEntrepreneur']
if '_telephoneEntrepreneur' in request.params:
_telephoneEntrepreneur = request.params['_telephoneEntrepreneur']
if '_faxEntrepreneur' in request.params:
_faxEntrepreneur = request.params['_faxEntrepreneur']
if '_courrielEntrepreneur' in request.params:
_courrielEntrepreneur = request.params['_courrielEntrepreneur']
if '_nomResponsableTravaux' in request.params:
_nomResponsableTravaux = request.params['_nomResponsableTravaux']
if '_prenomResponsableTravaux' in request.params:
_prenomResponsableTravaux = request.params['_prenomResponsableTravaux']
if '_mobileResponsableTravaux' in request.params:
_mobileResponsableTravaux = request.params['_mobileResponsableTravaux']
if '_telephoneResponsableTravaux' in request.params:
_telephoneResponsableTravaux = request.params['_telephoneResponsableTravaux']
if '_faxResponsableTravaux' in request.params:
_faxResponsableTravaux = request.params['_faxResponsableTravaux']
if '_courrielResponsableTravaux' in request.params:
_courrielResponsableTravaux = request.params['_courrielResponsableTravaux']
if '_facturation' in request.params:
_facturation = request.params['_facturation']
'''
if '_coordonnesX' in request.params:
_coordonnesX = request.params['_coordonnesX']
if '_coordonnesY' in request.params:
_coordonnesY = request.params['_coordonnesY']
if '_commune' in request.params:
_commune = request.params['_commune']
if '_cadastre' in request.params:
_cadastre = request.params['_cadastre']
if '_bienFonds' in request.params:
_bienFonds = request.params['_bienFonds']
if '_autreCadastre' in request.params:
_autreCadastre = request.params['_autreCadastre']
if '_autreBienFonds' in request.params:
_autreBienFonds = request.params['_autreBienFonds']
if '_lieuDit' in request.params:
_lieuDit = request.params['_lieuDit']
'''
if '_prTouches' in request.params:
_prTouches = request.params['_prTouches']
if _prTouches == 'true':
_prTouches = True
elif _prTouches == 'false':
_prTouches = False
else:
_prTouches = None
if '_longueurEtape' in request.params:
_longueurEtape = request.params['_longueurEtape']
if '_epaisseurCaisson' in request.params:
_epaisseurCaisson = request.params['_epaisseurCaisson']
if '_qualiteCaisson' in request.params:
_qualiteCaisson = request.params['_qualiteCaisson']
if '_epaisseurSupport' in request.params:
_epaisseurSupport = request.params['_epaisseurSupport']
if '_qualiteSupport' in request.params:
_qualiteSupport = request.params['_qualiteSupport']
if '_epaisseurRevetement' in request.params:
_epaisseurRevetement = request.params['_epaisseurRevetement']
if '_qualiteRevetement' in request.params:
_qualiteRevetement = request.params['_qualiteRevetement']
if '_qualiteEncollage' in request.params:
_qualiteEncollage = request.params['_qualiteEncollage']
if '_dateDebutValide' in request.params:
_dateDebutValide = request.params['_dateDebutValide']
if '_dateFinValide' in request.params:
_dateFinValide = request.params['_dateFinValide']
if '_dateMajValide' in request.params:
_dateMajValide = request.params['_dateMajValide']
if '_numeroFacture' in request.params:
_numeroFacture = request.params['_numeroFacture']
if '_dateFacture' in request.params:
_dateFacture = request.params['_dateFacture']
if '_reserveEventuelle' in request.params:
_reserveEventuelle = request.params['_reserveEventuelle']
if '_reperageEffectif' in request.params:
_reperageEffectif = request.params['_reperageEffectif']
if _reperageEffectif == 'true':
_reperageEffectif = True
elif _reperageEffectif == 'false':
_reperageEffectif = False
else:
_reperageEffectif = None
if '_planTypes' in request.params:
_planTypes = request.params['_planTypes']
if _planTypes:
plan_types_array = json.loads(_planTypes)
# Read params manifestation
if '_parcours' in request.params:
_parcours = request.params['_parcours']
# Read params Reperage
if '_idReperage' in request.params:
_idReperage = request.params['_idReperage']
if '_idDeviation' in request.params and request.params['_idDeviation'] != '':
_idDeviation = request.params['_idDeviation']
if '_proprietaire' in request.params:
_proprietaire = request.params['_proprietaire']
if '_axe' in request.params:
_axe = request.params['_axe']
if '_sens' in request.params:
_sens = request.params['_sens']
if '_prDebut' in request.params:
_prDebut = request.params['_prDebut']
if '_prDebutDistance' in request.params:
_prDebutDistance = request.params['_prDebutDistance']
if '_prFin' in request.params:
_prFin = request.params['_prFin']
if '_prFinDistance' in request.params:
_prFinDistance = request.params['_prFinDistance']
if '_ecartd' in request.params:
_ecartd = request.params['_ecartd']
if '_ecartf' in request.params:
_ecartf = request.params['_ecartf']
if '_usageNeg' in request.params:
_usageNeg = request.params['_usageNeg']
if _usageNeg == 'true':
_usageNeg = True
elif _usageNeg == 'false':
_usageNeg = False
else:
_usageNeg = None
if '_fSurf' in request.params and request.params['_fSurf'] != '':
_fSurf = request.params['_fSurf']
if '_fLong' in request.params and request.params['_fLong'] != '':
_fLong = request.params['_fLong']
with transaction.manager:
evenement_query = request.dbsession.query(models.Evenement).filter(
models.Evenement.id == idEvenement)
if evenement_query.count() == 0:
raise CustomError('{} with id {} not found'.format(models.Evenement.__tablename__, idEvenement))
evenement_record = evenement_query.first()
evenement_record.id_responsable = idResponsable
evenement_record.id_requerant = idRequerant
evenement_record.type = type
evenement_record.numero_dossier = numeroDossier
evenement_record.division = division
evenement_record.libelle = libelle
evenement_record.description = description
evenement_record.prevision = prevision
#evenement_record.urgence = urgence
evenement_record.date_debut = dateDebut
evenement_record.heure_debut = heureDebut
evenement_record.date_fin = dateFin
evenement_record.heure_fin = heureFin
evenement_record.localisation = localisation
# evenement_record.localite = localite
# evenement_record.lieu_dit = lieuDit
# evenement_record.reperage_effectif = reperageEffectif
evenement_record.nom_requerant = nomRequerant
evenement_record.rue_requerant = rueRequerant
evenement_record.localite_requerant = localiteRequerant
evenement_record.telephone_requerant = telephoneRequerant
evenement_record.fax_requerant = faxRequerant
evenement_record.courriel_requerant = courrielRequerant
evenement_record.nom_contact = nomContact
evenement_record.prenom_contact = prenomContact
evenement_record.mobile_contact = mobileContact
evenement_record.telephone_contact = telephoneContact
evenement_record.fax_contact = faxContact
evenement_record.courriel_contact = courrielContact
evenement_record.remarque = remarque
evenement_record.date_demande = dateDemande
evenement_record.date_octroi = dateOctroi
# evenement_record.id_utilisateur_ajout = ajoutePar
# evenement_record.date_ajout = dateAjout
evenement_record.id_utilisateur_modification = current_user_id,
# evenement_record.date_modification = dateModification
# evenement_record.date_suppression = dateSuppression
# Type evenement : autre
if int(type) == int(settings['autre_evenement_id']):
autre_evenement_query = request.dbsession.query(models.AutreEvenement).filter(
models.AutreEvenement.id_evenement == idEvenement)
if autre_evenement_query.count() == 0:
raise CustomError(
'{} with id_evenement {} not found'.format(models.AutreEvenement.__tablename__, idEvenement))
autre_evenement_record = autre_evenement_query.first()
autre_evenement_record.id_maitre_ouvrage = _idMaitreOuvrage
autre_evenement_record.id_direction_locale = _idDirectionLocale
autre_evenement_record.id_entrepreneur = _idEntrepreneur
autre_evenement_record.id_responsable_travaux = _idResponsableTravaux
autre_evenement_record.cause = _cause
autre_evenement_record.nom_maitre_ouvrage = _nomMaitreOuvrage
autre_evenement_record.rue_maitre_ouvrage = _rueMaitreOuvrage
autre_evenement_record.localite_maitre_ouvrage = _localiteMaitreOuvrage
autre_evenement_record.telephone_maitre_ouvrage = _telephoneMaitreOuvrage
autre_evenement_record.fax_maitre_ouvrage = _faxMaitreOuvrage
autre_evenement_record.courriel_maitre_ouvrage = _courrielMaitreOuvrage
autre_evenement_record.nom_direction_locale = _nomDirectionLocale
autre_evenement_record.prenom_direction_locale = _prenomDirectionLocale
autre_evenement_record.mobile_direction_locale = _mobileDirectionLocale
autre_evenement_record.telephone_direction_locale = _telephoneDirectionLocale
autre_evenement_record.fax_direction_locale = _faxDirectionLocale
autre_evenement_record.courriel_direction_locale = _courrielDirectionLocale
autre_evenement_record.nom_entrepreneur = _nomEntrepreneur
autre_evenement_record.rue_entrepreneur = _rueEntrepreneur
autre_evenement_record.localite_entrepreneur = _localiteEntrepreneur
autre_evenement_record.telephone_entrepreneur = _telephoneEntrepreneur
autre_evenement_record.fax_entrepreneur = _faxEntrepreneur
autre_evenement_record.courriel_entrepreneur = _courrielEntrepreneur
autre_evenement_record.nom_responsable_travaux = _nomResponsableTravaux
autre_evenement_record.prenom_responsable_travaux = _prenomResponsableTravaux
autre_evenement_record.mobile_responsable_travaux = _mobileResponsableTravaux
autre_evenement_record.telephone_responsable_travaux = _telephoneResponsableTravaux
autre_evenement_record.fax_responsable_travaux = _faxResponsableTravaux
autre_evenement_record.courriel_responsable_travaux = _courrielResponsableTravaux
autre_evenement_record.facturation = _facturation
'''
autre_evenement_record.coordonnes_x = _coordonnesX
autre_evenement_record.coordonnes_y = _coordonnesY
autre_evenement_record.commune = _commune
autre_evenement_record.cadastre = _cadastre
autre_evenement_record.bien_fonds = _bienFonds
autre_evenement_record.autre_cadastre = _autre_cadastre
autre_evenement_record.autre_bien_fonds = _autre_bienFonds
autre_evenement_record.lieu_dit = _lieuDit
'''
autre_evenement_record.date_debut_valide = _dateDebutValide
autre_evenement_record.date_fin_valide = _dateFinValide
autre_evenement_record.date_maj_valide = _dateMajValide
autre_evenement_record.numero_facture = _numeroFacture
autre_evenement_record.date_facture = _dateFacture
autre_evenement_record.reserve_eventuelle = _reserveEventuelle
# Type evenement : Chantier
elif int(type) == int(settings['chantier_evenement_id']):
chantier_query = request.dbsession.query(models.Chantier).filter(
models.Chantier.id_evenement == idEvenement)
if chantier_query.count() == 0:
raise CustomError(
'{} with id_evenement {} not found'.format(models.Chantier.__tablename__, idEvenement))
chantier_record = chantier_query.first()
chantier_record.id_maitre_ouvrage = _idMaitreOuvrage
chantier_record.id_direction_locale = _idDirectionLocale
chantier_record.id_entrepreneur = _idEntrepreneur
chantier_record.id_responsable_travaux = _idResponsableTravaux
chantier_record.projet = _projet
chantier_record.longueur_etape = _longueurEtape
chantier_record.surface = _surface
chantier_record.id_centrale_enrobage = _idCentraleEnrobage
chantier_record.epaisseur_caisson = _epaisseurCaisson
chantier_record.qualite_caisson = _qualiteCaisson
chantier_record.epaisseur_support = _epaisseurSupport
chantier_record.qualite_support = _qualiteSupport
chantier_record.epaisseur_revetement = _epaisseurRevetement
chantier_record.qualite_revetement = _qualiteRevetement
chantier_record.qualite_encollage = _qualiteEncollage
chantier_record.boucle_induction = _boucleInduction
chantier_record.faucher_accotement = _faucherAccotement
chantier_record.curer_depotoirs = _curerDepotoirs
chantier_record.nettoyer_bords = _nettoyer_bords
chantier_record.colmater_fissure = _colmater_fissure
chantier_record.pr_touches = _prTouches
chantier_record.autre = _autre
chantier_record.lieu_seance = _lieuSeance
chantier_record.jour_seance = _jourSeance
chantier_record.heure_seance = _heureSeance
# Delete old categories
request.dbsession.query(models.LienChantierCategorieChantier).filter(
models.LienChantierCategorieChantier.id_chantier == chantier_record.id).delete(synchronize_session=False)
if categories_array and len(categories_array) > 0:
for category_id in categories_array:
lien_categ_chant = models.LienChantierCategorieChantier(
id_chantier=chantier_record.id,
categorie=category_id
)
request.dbsession.add(lien_categ_chant)
# Type evenement : Fouille
elif int(type) == int(settings['fouille_evenement_id']):
fouille_query = request.dbsession.query(models.Fouille).filter(
models.Fouille.id_evenement == idEvenement)
if fouille_query.count() == 0:
raise CustomError(
'{} with id_evenement {} not found'.format(models.Fouille.__tablename__, idEvenement))
fouille_record = fouille_query.first()
fouille_record.id_maitre_ouvrage = _idMaitreOuvrage
fouille_record.id_direction_locale = _idDirectionLocale
fouille_record.id_entrepreneur = _idEntrepreneur
fouille_record.id_responsable_travaux = _idResponsableTravaux
fouille_record.nom_maitre_ouvrage = _nomMaitreOuvrage
fouille_record.rue_maitre_ouvrage = _rueMaitreOuvrage
fouille_record.localite_maitre_ouvrage = _localiteMaitreOuvrage
fouille_record.telephone_maitre_ouvrage = _telephoneMaitreOuvrage
fouille_record.fax_maitre_ouvrage = _faxMaitreOuvrage
fouille_record.courriel_maitre_ouvrage = _courrielMaitreOuvrage
fouille_record.nom_direction_locale = _nomDirectionLocale
fouille_record.prenom_direction_locale = _prenomDirectionLocale
fouille_record.mobile_direction_locale = _mobileDirectionLocale
fouille_record.telephone_direction_locale = _telephoneDirectionLocale
fouille_record.fax_direction_locale = _faxDirectionLocale
fouille_record.courriel_direction_locale = _courrielDirectionLocale
fouille_record.nom_entrepreneur = _nomEntrepreneur
fouille_record.rue_entrepreneur = _rueEntrepreneur
fouille_record.localite_entrepreneur = _localiteEntrepreneur
fouille_record.telephone_entrepreneur = _telephoneEntrepreneur
fouille_record.fax_entrepreneur = _faxEntrepreneur
fouille_record.courriel_entrepreneur = _courrielEntrepreneur
fouille_record.nom_responsable_travaux = _nomResponsableTravaux
fouille_record.prenom_responsable_travaux = _prenomResponsableTravaux
fouille_record.mobile_responsable_travaux = _mobileResponsableTravaux
fouille_record.telephone_responsable_travaux = _telephoneResponsableTravaux
fouille_record.fax_responsable_travaux = _faxResponsableTravaux
fouille_record.courriel_responsable_travaux = _courrielResponsableTravaux
fouille_record.facturation = _facturation
# fouille_record.coordonnes_x = _coordonnesX
# fouille_record.coordonnes_y = _coordonnesY
# fouille_record.commune = _commune
# fouille_record.cadastre = _cadastre
# fouille_record.bien_fonds = _bienFonds
# fouille_record.autre_cadastre = _autreCadastre
# fouille_record.autre_bien_fonds = _autreBienFonds
# fouille_record.lieu_dit = _lieuDit
fouille_record.pr_touches = _prTouches
fouille_record.longueur_etape = _longueurEtape
fouille_record.epaisseur_caisson = _epaisseurCaisson
fouille_record.qualite_caisson = _qualiteCaisson
fouille_record.epaisseur_support = _epaisseurSupport
fouille_record.qualite_support = _qualiteSupport
fouille_record.epaisseur_revetement = _epaisseurRevetement
fouille_record.qualite_revetement = _qualiteRevetement
fouille_record.qualite_encollage = _qualiteEncollage
fouille_record.date_debut_valide = _dateDebutValide
fouille_record.date_fin_valide = _dateFinValide
fouille_record.date_maj_valide = _dateMajValide
fouille_record.numero_facture = _numeroFacture
fouille_record.date_facture = _dateFacture
fouille_record.reserve_eventuelle = _reserveEventuelle
# Plan types
# Delete old plan types
request.dbsession.query(models.LienFouillePlanType).filter(
models.LienFouillePlanType.id_evenement == idEvenement).delete(synchronize_session=False)
if plan_types_array and len(plan_types_array) > 0:
for plan_type_id in plan_types_array:
lien_fouille_plan = models.LienFouillePlanType(
id_evenement=idEvenement,
id_plan_type=plan_type_id
)
request.dbsession.add(lien_fouille_plan)
# Type evenement : Manifestation
elif int(type) == int(settings['manifestation_evenement_id']):
manifestation_query = request.dbsession.query(models.Manifestation).filter(
models.Manifestation.id_evenement == idEvenement)
if manifestation_query.count() == 0:
raise CustomError(
'{} with id_evenement {} not found'.format(models.Manifestation.__tablename__, idEvenement))
manifestation_record = manifestation_query.first()
manifestation_record.parcours = _parcours
# Geometries_reperages
# Delete old geometries
ev_ligne_ids = []
for item in request.dbsession.query(models.EvenementLigne.id).filter(
models.EvenementLigne.id_evenement == idEvenement).all():
ev_ligne_ids.append(item.id)
request.dbsession.query(models.Reperage).filter(
models.Reperage.id_evenement_ligne.in_(ev_ligne_ids)).delete(synchronize_session=False)
request.dbsession.query(models.EvenementPoint).filter(
models.EvenementPoint.id_evenement == idEvenement).delete()
request.dbsession.query(models.EvenementLigne).filter(
models.EvenementLigne.id_evenement == idEvenement).delete()
request.dbsession.query(models.EvenementPolygone).filter(
models.EvenementPolygone.id_evenement == idEvenement).delete()
# Add new geometries
if geometries_reperages != None:
json_geometries_reperages = json.loads(geometries_reperages)
for onegeojson in json_geometries_reperages:
# Geometry
if 'geometry' in onegeojson:
geometry = onegeojson['geometry']
if 'type' in geometry:
type_geom = geometry['type']
# Point
if type_geom == 'Point':
evenement_point_model = models.EvenementPoint(id_evenement=idEvenement)
evenement_point_model.set_json_geometry(str(geometry), settings['srid'])
request.dbsession.add(evenement_point_model)
# Line
elif type_geom == 'LineString' or type_geom == 'MultiLineString' or type_geom == 'GeometryCollection':
evenement_ligne_model = models.EvenementLigne(id_evenement=idEvenement)
evenement_ligne_model.set_json_geometry(str(geometry), settings['srid'])
request.dbsession.add(evenement_ligne_model)
if 'reperage' in onegeojson:
request.dbsession.flush()
reperage = onegeojson['reperage']
reperage_model = models.Reperage(
id_evenement_ligne=evenement_ligne_model.id,
id_deviation=reperage['idDeviation'],
proprietaire=reperage['proprietaire'],
axe=reperage['axe'],
sens=reperage['sens'],
pr_debut=reperage['prDebut'],
pr_debut_distance=reperage['prDebutDistance'],
pr_fin=reperage['prFin'],
pr_fin_distance=reperage['prFinDistance'],
ecartd=reperage['ecartd'],
ecartf=reperage['ecartf'],
usage_neg=reperage['usageNeg'],
f_surf=reperage['fSurf'],
f_long=reperage['fLong']
)
request.dbsession.add(reperage_model)
# Polygon
elif type_geom == 'Polygon':
evenement_polygon_model = models.EvenementPolygone(id_evenement=idEvenement)
evenement_polygon_model.set_json_geometry(str(geometry), settings['srid'])
request.dbsession.add(evenement_polygon_model)
# Commit transaction
transaction.commit()
request.dbsession.flush()
# Send mail to SRB touché
if _prTouches:
subject = 'subject'
body = 'body'
query = request.dbsession.query(models.ContactAvisPrTouche.id_contact).all()
conatct_ids = []
for c in query:
conatct_ids.append(c.id_contact)
contact_mails = Utils.get_contacts_mails_by_ids(request, conatct_ids)
PTMailer.send_mail(request, contact_mails, subject, body)
except HTTPForbidden as e:
raise HTTPForbidden()
except CustomError as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
except Exception as e:
transaction.abort()
request.dbsession.rollback()
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Type perturbation by id view
########################################################
@view_config(route_name='types_perturbation_by_id', request_method='GET', renderer='json')
def type_perturbation_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.TypePerturbation)
result = query.filter(models.TypePerturbation.id == request.matchdict['id']).first()
if not result:
raise Exception(id_not_found_exception)
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return result
########################################################
# Delete perturbation by id view
########################################################
@view_config(route_name='perturbation_by_id', request_method='DELETE', renderer='json')
def delete_perturbation_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
id = request.matchdict['id']
# Check authorization
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
current_user_id = Utils.get_connected_user_id(request)
current_user_id = int(current_user_id) if current_user_id else None
if current_user_id is None:
raise HTTPForbidden()
# Check if the user has permission to delete evenement
user_can_delete_perturbation = Utils.user_can_delete_perturbation(request, current_user_id, id)
if not user_can_delete_perturbation:
raise HTTPForbidden()
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.Perturbation)
perturbation = query.filter(models.Perturbation.id == id).first()
if not perturbation:
raise Exception(id_not_found_exception)
with transaction.manager:
perturbation.date_suppression = func.now()
# Commit transaction
transaction.commit()
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Types perturbations view
########################################################
@view_config(route_name='types_perturbations', request_method='GET', renderer='json')
@view_config(route_name='types_perturbations_slash', request_method='GET', renderer='json')
def types_perturbations_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.TypePerturbation).all()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return query
########################################################
# Perturbation by id view
########################################################
@view_config(route_name='perturbation_by_id', request_method='GET', renderer='json')
def perturbation_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query_perturbation = request.dbsession.query(models.Perturbation)
perturbation = query_perturbation.filter(models.Perturbation.id == request.matchdict['id']).first()
# Get type evenement
query_evenement = request.dbsession.query(models.Evenement)
evenement = query_evenement.filter(models.Evenement.id == perturbation.id_evenement).first()
if not perturbation:
raise Exception(id_not_found_exception)
except (exc.SQLAlchemyError, exc.DBAPIError, Exception) as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return perturbation.format_with_type_evenement(evenement.type)
########################################################
# Perturbations view
########################################################
@view_config(route_name='perturbations', request_method='GET', renderer='json')
@view_config(route_name='perturbations_slash', request_method='GET', renderer='json')
def perturbations_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.Perturbation).all()
formattedResult = []
for perturbation in query:
formattedResult.append(perturbation.format())
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return formattedResult
########################################################
# Get Perturbation edition by id view
########################################################
@view_config(route_name='perturbation_edition_by_id', request_method='GET', renderer='json')
def perturbation_edition_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
id = request.matchdict['id']
# Check authorization
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
current_user_id = Utils.get_connected_user_id(request)
# Check if the user has permission to read perturbation
user_can_read_perturbation = Utils.user_can_read_perturbation(request, current_user_id, id)
if not user_can_read_perturbation:
raise HTTPForbidden()
relatedtype = None
# Perturbation
query_perturbation = request.dbsession.query(models.Perturbation)
perturbation = query_perturbation.filter(models.Perturbation.id == id).first()
if not perturbation:
raise Exception(id_not_found_exception)
# Utilisateur ajout
contact_utilisateur_ajout = request.dbsession.query(models.Contact).filter(
models.Contact.id == perturbation.id_utilisateur_ajout).first()
# Utilisateur modification
contact_utilisateur_modification = request.dbsession.query(models.Contact).filter(
models.Contact.id == perturbation.id_utilisateur_modification).first()
# Utilisateur validation
contact_utilisateur_validation = request.dbsession.query(models.Contact).filter(
models.Contact.id == perturbation.id_utilisateur_validation).first()
# Type perturbation : Fermeture
if perturbation.type == int(settings['fermeture_perturbation_id']):
query = request.dbsession.query(models.Fermeture)
relatedtype = query.filter(models.Fermeture.id_perturbation == id).first()
# Type perturbation : Occupation
elif perturbation.type == int(settings['occupation_perturbation_id']):
query = request.dbsession.query(models.Occupation)
relatedtype = query.filter(models.Occupation.id_perturbation == id).first()
# Geometries
geometries_array = []
query_geom_point = request.dbsession.query(models.PerturbationPoint.id,
func.public.ST_AsGeoJSON(models.PerturbationPoint.geometry).label(
"geometry")).filter(
models.PerturbationPoint.id_perturbation == id).all()
query_geom_ligne = request.dbsession.query(models.PerturbationLigne.id,
func.public.ST_AsGeoJSON(models.PerturbationLigne.geometry).label(
"geometry")).filter(
models.PerturbationLigne.id_perturbation == id).all()
for item in query_geom_point + query_geom_ligne:
geometries_array.append({'id': item.id, 'geometry': item.geometry})
# Contacts à aviser
contacts_a_aviser = []
for ap, c in request.dbsession.query(models.AvisPerturbation, models.Contact).filter(
models.AvisPerturbation.id_perturbation == id).filter(models.AvisPerturbation.id_contact == models.Contact.id).all():
contacts_a_aviser.append(c)
# Reperage
reperages = []
evenement_lignes_ids = []
for item in query_geom_ligne:
evenement_lignes_ids.append(item.id)
if len(evenement_lignes_ids) > 0:
query_reperage = request.dbsession.query(models.Reperage).filter(models.Reperage.id_perturbation_ligne.in_(evenement_lignes_ids)).all()
if query_reperage:
for item in query_reperage:
reperages.append(item.format())
# Deviations
deviations = []
query_deviations = request.dbsession.query(models.Deviation.id,
func.public.ST_AsGeoJSON(models.Deviation.geometry).label(
"geometry")).filter(
models.Deviation.id_perturbation == id).all()
for item in query_deviations:
deviations.append({'id': item.id, 'geometry': item.geometry})
# Format perturbation
perturbation = perturbation.format()
if contact_utilisateur_ajout:
perturbation['nom_utilisateur_ajout'] = contact_utilisateur_ajout.prenom + ' ' + contact_utilisateur_ajout.nom
if contact_utilisateur_modification:
perturbation['nom_utilisateur_modification'] = contact_utilisateur_modification.prenom + ' ' + contact_utilisateur_modification.nom
if contact_utilisateur_validation:
perturbation['nom_utilisateur_validation'] = contact_utilisateur_validation.prenom + ' ' + contact_utilisateur_validation.nom
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return {'perturbation': perturbation, 'reperages': reperages,
'infos': {} if not relatedtype else relatedtype, 'contacts_a_aviser': contacts_a_aviser, 'geometries': geometries_array, 'deviations': deviations}
########################################################
# Add perturbation edition
########################################################
@view_config(route_name='perturbation_edition', request_method='POST', renderer='json')
@view_config(route_name='perturbation_edition_slash', request_method='POST', renderer='json')
def add_perturbation_edition(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
evenement_record = None
contacts_a_aviser_ids_array = []
# Check authorization
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
current_user_id = Utils.get_connected_user_id(request)
current_user_id = int(current_user_id) if current_user_id else None
if current_user_id is None:
raise HTTPForbidden()
# Default params value
idEvenement = None
idEntite = None
idResponsableTrafic = None
type = None
trancheHoraire = None
description = None
dateDebut = None
heureDebut = None
dateFin = None
heureFin = None
localisation = None
nomResponsableTrafic = None
prenomResponsableTrafic = None
mobileResponsableTrafic = None
telephoneResponsableTrafic = None
faxResponsableTrafic = None
courrielResponsableTrafic = None
remarque = None
urgence = None
etat = int(settings['perturbation_etat_attente_code'])
dateValidation = None
utilisateurValidation = None
decision = None
dateDecision = None
#ajoutePar = None
# dateAjout = None
# modifiePar = None
# dateModification = None
# dateSuppression = None
geometries_reperages = None
"""Contacts à aviser"""
contacts_a_aviser = None
"""Fermeture"""
_deviation = None
_idResponsable = None
"""Occupation"""
_idResponsableRegulation = None
_typeOccupation = None
_typeRegulation = None
_voiesCondamnees = None
_largeurGabarit = None
_hauteurGabarit = None
_heurePointe = None
_weekEnd = None
"""Reperage"""
_idDeviation = None
_proprietaire = None
_axe = None
_sens = None
_prDebut = None
_prDebutDistance = None
_prFin = None
_prFinDistance = None
_ecartd = None
_ecartf = None
_usageNeg = None
_fSurf = None
_fLong = None
"""Deviations"""
geometries_deviations = None
# Read perturbation params
if 'idEvenement' in request.params:
idEvenement = request.params['idEvenement']
evenement_record = request.dbsession.query(models.Evenement).filter(models.Evenement.id == idEvenement).first()
if not evenement_record:
raise Exception('Evenement not found')
if 'idEntite' in request.params:
idEntite = request.params['idEntite']
# Check if the user has permission to add perturbation
user_can_add_perturbation = Utils.user_can_add_perturbation(request, current_user_id, idEvenement, idEntite)
if not user_can_add_perturbation:
raise HTTPForbidden()
if 'idResponsableTrafic' in request.params:
idResponsableTrafic = request.params['idResponsableTrafic']
if 'type' in request.params:
type = request.params['type']
if 'trancheHoraire' in request.params:
trancheHoraire = request.params['trancheHoraire']
if trancheHoraire == 'true':
trancheHoraire = True
elif trancheHoraire == 'false':
trancheHoraire = False
else:
trancheHoraire = None
if 'description' in request.params:
description = request.params['description']
if 'dateDebut' in request.params:
dateDebut = request.params['dateDebut']
if 'heureDebut' in request.params:
heureDebut = request.params['heureDebut']
if 'dateFin' in request.params:
dateFin = request.params['dateFin']
if 'heureFin' in request.params:
heureFin = request.params['heureFin']
if 'localisation' in request.params:
localisation = request.params['localisation']
if 'nomResponsableTrafic' in request.params:
nomResponsableTrafic = request.params['nomResponsableTrafic']
if 'prenomResponsableTrafic' in request.params:
prenomResponsableTrafic = request.params['prenomResponsableTrafic']
if 'mobileResponsableTrafic' in request.params:
mobileResponsableTrafic = request.params['mobileResponsableTrafic']
if 'telephoneResponsableTrafic' in request.params:
telephoneResponsableTrafic = request.params['telephoneResponsableTrafic']
if 'faxResponsableTrafic' in request.params:
faxResponsableTrafic = request.params['faxResponsableTrafic']
if 'courrielResponsableTrafic' in request.params:
courrielResponsableTrafic = request.params['courrielResponsableTrafic']
if 'remarque' in request.params:
remarque = request.params['remarque']
"""
if 'urgence' in request.params:
urgence = request.params['urgence']
if urgence == 'true':
urgence = True
elif urgence == 'false':
urgence = False
else:
urgence = None
"""
# Check date_debut, if less than 24h, urgence=true
urgence = False
if dateDebut != None and heureDebut != None:
date_time_str = str(dateDebut) + ' ' + str(heureDebut)
date_time_obj = datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S')
now = datetime.datetime.now()
if date_time_obj >= now and date_time_obj <= now + timedelta(days=1):
urgence = True
if 'etat' in request.params:
etat = request.params['etat']
etat = int(etat) if etat != None and etat != '' else int(settings['perturbation_etat_attente_code'])
# If urgence == true, etat = accepté
if urgence == True:
etat = settings['perturbation_etat_acceptee_code']
# If urgence == False, Check role Trafic
else:
user_can_update_etat_perturbation_creation = Utils.user_can_update_etat_perturbation_creation(request, current_user_id)
# If not authorized, force default etat (en attente)
if not user_can_update_etat_perturbation_creation:
etat = int(settings['perturbation_etat_attente_code'])
if int(etat) == int(settings['perturbation_etat_acceptee_code']):
dateValidation = datetime.datetime.today().strftime('%Y-%m-%d')
utilisateurValidation = current_user_id
"""
if 'dateValidation' in request.params:
dateValidation = request.params['dateValidation']
if 'utilisateurValidation' in request.params:
utilisateurValidation = request.params['utilisateurValidation']
"""
if 'decision' in request.params:
decision = request.params['decision']
if 'dateDecision' in request.params:
dateDecision = request.params['dateDecision']
"""
if 'ajoutePar' in request.params:
ajoutePar = request.params['ajoutePar']
if 'dateAjout' in request.params:
dateAjout = request.params['dateAjout']
if 'modifiePar' in request.params:
modifiePar = request.params['modifiePar']
if 'dateModification' in request.params:
dateModification = request.params['dateModification']
if 'dateSuppression' in request.params:
dateSuppression = request.params['dateSuppression']
"""
if 'geometries_reperages' in request.params:
geometries_reperages = request.params['geometries_reperages']
#Read contacts à aviser
if 'contacts_a_aviser' in request.params:
contacts_a_aviser = request.params['contacts_a_aviser']
# Read fermeture params
if '_deviation' in request.params:
_deviation = request.params['_deviation']
if '_idResponsable' in request.params:
_idResponsable = request.params['_idResponsable']
# Read occupation params
if '_idResponsableRegulation' in request.params:
_idResponsableRegulation = request.params['_idResponsableRegulation']
if '_typeOccupation' in request.params:
_typeOccupation = request.params['_typeOccupation']
if '_typeRegulation' in request.params:
_typeRegulation = request.params['_typeRegulation']
if '_voiesCondamnees' in request.params:
_voiesCondamnees = request.params['_voiesCondamnees']
if '_largeurGabarit' in request.params:
_largeurGabarit = request.params['_largeurGabarit']
if '_hauteurGabarit' in request.params:
_hauteurGabarit = request.params['_hauteurGabarit']
if '_heurePointe' in request.params:
_heurePointe = request.params['_heurePointe']
if _heurePointe == 'true':
_heurePointe = True
elif _heurePointe == 'false':
_heurePointe = False
else:
_heurePointe = None
if '_weekEnd' in request.params:
_weekEnd = request.params['_weekEnd']
if _weekEnd == 'true':
_weekEnd = True
elif _weekEnd == 'false':
_weekEnd = False
else:
_weekEnd = None
# Read params Reperage
if '_idDeviation' in request.params and request.params['_idDeviation'] != '':
_idDeviation = request.params['_idDeviation']
if '_proprietaire' in request.params:
_proprietaire = request.params['_proprietaire']
if '_axe' in request.params:
_axe = request.params['_axe']
if '_sens' in request.params:
_sens = request.params['_sens']
if '_prDebut' in request.params:
_prDebut = request.params['_prDebut']
if '_prDebutDistance' in request.params:
_prDebutDistance = request.params['_prDebutDistance']
if '_prFin' in request.params:
_prFin = request.params['_prFin']
if '_prFinDistance' in request.params:
_prFinDistance = request.params['_prFinDistance']
if '_ecartd' in request.params:
_ecartd = request.params['_ecartd']
if '_ecartf' in request.params:
_ecartf = request.params['_ecartf']
if '_usageNeg' in request.params:
_usageNeg = request.params['_usageNeg']
if _usageNeg == 'true':
_usageNeg = True
elif _usageNeg == 'false':
_usageNeg = False
else:
_usageNeg = None
if '_fSurf' in request.params and request.params['_fSurf'] != '':
_fSurf = request.params['_fSurf']
if '_fLong' in request.params and request.params['_fLong'] != '':
_fLong = request.params['_fLong']
#Read params deviations
if 'geometries_deviations' in request.params and request.params['geometries_deviations'] != '':
geometries_deviations = request.params['geometries_deviations']
with transaction.manager:
perturbation_model = models.Perturbation(
id_evenement=idEvenement,
id_responsable_trafic=idResponsableTrafic,
type=type,
tranche_horaire=trancheHoraire,
description=description,
date_debut=dateDebut,
heure_debut=heureDebut,
date_fin=dateFin,
heure_fin=heureFin,
localisation=localisation,
nom_responsable_trafic=nomResponsableTrafic,
prenom_responsable_trafic=prenomResponsableTrafic,
mobile_responsable_trafic=mobileResponsableTrafic,
telephone_responsable_trafic=telephoneResponsableTrafic,
fax_responsable_trafic=faxResponsableTrafic,
courriel_responsable_trafic=courrielResponsableTrafic,
remarque=remarque,
urgence=urgence,
etat=etat,
date_validation=dateValidation,
id_utilisateur_validation=utilisateurValidation,
decision=decision,
date_decision=dateDecision,
id_utilisateur_ajout=current_user_id,
# date_ajout=dateAjout,
id_utilisateur_modification=current_user_id
# date_modification=dateModification,
# date_suppression=dateSuppression
)
request.dbsession.add(perturbation_model)
request.dbsession.flush()
max_perturb_id = perturbation_model.id
# Historiser les changements d'état
Utils.add_historique_etat_perturbation(request, current_user_id, max_perturb_id, etat)
# Contacts à aviser
if contacts_a_aviser != None:
json_contacts_a_aviser = json.loads(contacts_a_aviser)
for onecontactid in json_contacts_a_aviser:
contacts_a_aviser_ids_array.append(onecontactid)
avis_perturbation_model = models.AvisPerturbation(
id_perturbation=max_perturb_id,
id_contact=onecontactid)
request.dbsession.add(avis_perturbation_model)
# Related model
related_model = None
# Type perturbation : Fermeture
if int(type) == int(settings['fermeture_perturbation_id']):
related_model = models.Fermeture(
id_perturbation=max_perturb_id,
deviation=_deviation,
id_responsable=_idResponsable)
# Type perturbation : Occupation
elif int(type) == int(settings['occupation_perturbation_id']):
related_model = models.Occupation(
id_perturbation=max_perturb_id,
id_responsable_regulation=_idResponsableRegulation,
type_regulation=_typeRegulation,
voies_condamnees=_voiesCondamnees,
largeur_gabarit=_largeurGabarit,
hauteur_gabarit=_hauteurGabarit,
heure_pointe=_heurePointe,
week_end=_weekEnd,
type_occupation= _typeOccupation)
request.dbsession.add(related_model)
request.dbsession.flush()
# Geometries_reperages
reperages_list = []
if geometries_reperages != None:
json_geometries_reperages = json.loads(geometries_reperages)
for onegeojson in json_geometries_reperages:
# Geometry
if 'geometry' in onegeojson:
geometry = onegeojson['geometry']
if 'type' in geometry:
type_geom = geometry['type']
# Point
if type_geom == 'Point':
perturbation_point_model = models.PerturbationPoint(id_perturbation=max_perturb_id)
perturbation_point_model.set_json_geometry(str(geometry), settings['srid'])
request.dbsession.add(perturbation_point_model)
# Line
elif type_geom == 'LineString' or type_geom == 'MultiLineString' or type_geom == 'GeometryCollection':
perturbation_ligne_model = models.PerturbationLigne(id_perturbation=max_perturb_id)
perturbation_ligne_model.set_json_geometry(str(geometry), settings['srid'])
request.dbsession.add(perturbation_ligne_model)
if 'reperage' in onegeojson:
request.dbsession.flush()
reperage = onegeojson['reperage']
reperage_model = models.Reperage(
id_perturbation_ligne=perturbation_ligne_model.id,
id_deviation=reperage['idDeviation'],
proprietaire=reperage['proprietaire'],
axe=reperage['axe'],
sens=reperage['sens'],
pr_debut=reperage['prDebut'],
pr_debut_distance=reperage['prDebutDistance'],
pr_fin=reperage['prFin'],
pr_fin_distance=reperage['prFinDistance'],
ecartd=reperage['ecartd'],
ecartf=reperage['ecartf'],
usage_neg=reperage['usageNeg'],
f_surf=reperage['fSurf'],
f_long=reperage['fLong']
)
request.dbsession.add(reperage_model)
reperages_list.append(reperage_model)
# Geometries_deviations
if geometries_deviations != None:
json_geometries_deviations = json.loads(geometries_deviations)
for onegeojson in json_geometries_deviations:
deviation_model = models.Deviation(id_perturbation=max_perturb_id)
deviation_model.set_json_geometry(str(onegeojson), settings['srid'])
request.dbsession.add(deviation_model)
transaction.commit()
# Prepare mail to send
mail_dict = Utils.create_perturbation_mail_dict(request, perturbation_model, evenement_record,
_deviation)
# Reperages list
reperages_string = ''
for reperage_model in reperages_list:
reperages_string += '<td><p>{}</p></td><td><p>{}</p></td><td><p>{}</p></td><td><p>{}</p></td><td><p>{}</p></td><td><p>{}</p></td>'.format(
'???', reperage_model.axe, reperage_model.pr_debut, reperage_model.pr_debut_distance,
reperage_model.pr_fin, reperage_model.pr_fin_distance)
# Envoi email si fermeture d'urgence
# Envoi à la liste des personnes concernées par les fermetures d’urgence
if perturbation_model.urgence:
mails_contacts_mails_fermeture_urgence = Utils.get_mails_contacts_mails_fermeture_urgence(
request)
if mails_contacts_mails_fermeture_urgence and len(mails_contacts_mails_fermeture_urgence) > 0:
PTMailer.send_templated_mail(request, mails_contacts_mails_fermeture_urgence,
settings['mail_fermeture_urgence_subject'],
'email_templates:fermeture_urgence', mail_dict,
reperages_string)
# Envoi d’emails régulier lors d’une fermeture et occupation
contacts_a_aviser_mails_array = []
# Envoi à la liste des personnes sélectionnées dans le formulaire
if len(contacts_a_aviser_ids_array) > 0:
contacts_a_aviser_mails_array = Utils.get_contacts_mails_by_ids(request, contacts_a_aviser_ids_array)
# If Accepté → Envoi au créateur
if int(etat) == int(settings['perturbation_etat_acceptee_code']):
connected_user = LDAPQuery.get_connected_user(request)
mail_att_name = settings['ldap_user_attribute_mail']
if connected_user and mail_att_name in connected_user:
connected_user_mail = connected_user[mail_att_name]
if not connected_user_mail in contacts_a_aviser_mails_array:
contacts_a_aviser_mails_array.append(connected_user_mail)
# If En attente → Envoi à l’approbateur = rôle trafic
elif int(etat) == int(settings['perturbation_etat_attente_code']):
contacts_a_aviser_mails_array += Utils.get_mails_of_contacts_belonging_to_a_group(request, settings['ldap_trafic_group_name'])
#Delete duplicates from array
contacts_a_aviser_mails_array = list(dict.fromkeys(contacts_a_aviser_mails_array))
if contacts_a_aviser_mails_array and len(contacts_a_aviser_mails_array) > 0:
PTMailer.send_templated_mail(request, contacts_a_aviser_mails_array,
'FERMETURE' if int(perturbation_model.type) == int(settings['fermeture_perturbation_id']) else "OCCUPATION" if int(perturbation_model.type) == int(settings['occupation_perturbation_id']) else 'Info',
'email_templates:fermeture_occupation', mail_dict,
reperages_string)
# Envoi d’email en cas de SRB touché
# Envoi à la liste des personnes GMAR
evenement_pr_touche = Utils.check_evenement_pr_touche(request, idEvenement)
if evenement_pr_touche:
contacts_pr_touche = Utils.get_mails_contacts_pr_touche(request)
if contacts_pr_touche and len(contacts_pr_touche) > 0:
PTMailer.send_templated_mail(request, contacts_pr_touche,
settings['mail_srb_touche_subject'],
'email_templates:srb_touche', mail_dict,
reperages_string)
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as e:
# tm.abort()
request.dbsession.rollback()
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Update perturbation edition
########################################################
@view_config(route_name='perturbation_edition', request_method='PUT', renderer='json')
@view_config(route_name='perturbation_edition_slash', request_method='PUT', renderer='json')
def update_perturbation_edition(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
# Check authorization
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
current_user_id = Utils.get_connected_user_id(request)
current_user_id = int(current_user_id) if current_user_id else None
if current_user_id is None:
raise HTTPForbidden()
evenement_record = None
etat_updated = False
urgence_updated = False
contacts_a_aviser_ids_array = []
# Default params value
idPerturbation = None
idEvenement = None
idResponsableTrafic = None
type = None
trancheHoraire = None
description = None
dateDebut = None
heureDebut = None
dateFin = None
heureFin = None
localisation = None
nomResponsableTrafic = None
prenomResponsableTrafic = None
mobileResponsableTrafic = None
telephoneResponsableTrafic = None
faxResponsableTrafic = None
courrielResponsableTrafic = None
remarque = None
urgence = None
etat = None
dateValidation = None
utilisateurValidation = None
decision = None
dateDecision = None
# ajoutePar = None
# dateAjout = None
# modifiePar = None
dateModification = None
# dateSuppression = None
geometries_reperages = None
"""Contacts à aviser"""
contacts_a_aviser = None
"""Common"""
_idPerturbation = None
"""Fermeture"""
_deviation = None
_idResponsable = None
"""Occupation"""
_idResponsableRegulation = None
_typeOccupation = None
_typeRegulation = None
_voiesCondamnees = None
_largeurGabarit = None
_hauteurGabarit = None
_heurePointe = None
_weekEnd = None
"""geometries_deviations"""
geometries_deviations = None
# Read perturbation params
if 'idPerturbation' in request.params:
idPerturbation = request.params['idPerturbation']
if not idPerturbation:
raise Exception('Id perturbation is null')
# Check if the user has permission to update perturbation
user_can_update_perturbation = Utils.user_can_update_perturbation(request, current_user_id, idPerturbation)
if not user_can_update_perturbation:
raise HTTPForbidden()
if 'idEvenement' in request.params:
idEvenement = request.params['idEvenement']
evenement_record = request.dbsession.query(models.Evenement).filter(
models.Evenement.id == idEvenement).first()
if not evenement_record:
raise Exception('Evenement not found')
if 'idResponsableTrafic' in request.params:
idResponsableTrafic = request.params['idResponsableTrafic']
if 'type' in request.params:
type = request.params['type']
if 'trancheHoraire' in request.params:
trancheHoraire = request.params['trancheHoraire']
if trancheHoraire == 'true':
trancheHoraire = True
elif trancheHoraire == 'false':
trancheHoraire = False
else:
trancheHoraire = None
if 'description' in request.params:
description = request.params['description']
if 'dateDebut' in request.params:
dateDebut = request.params['dateDebut']
if 'heureDebut' in request.params:
heureDebut = request.params['heureDebut']
if 'dateFin' in request.params:
dateFin = request.params['dateFin']
if 'heureFin' in request.params:
heureFin = request.params['heureFin']
if 'localisation' in request.params:
localisation = request.params['localisation']
if 'nomResponsableTrafic' in request.params:
nomResponsableTrafic = request.params['nomResponsableTrafic']
if 'prenomResponsableTrafic' in request.params:
prenomResponsableTrafic = request.params['prenomResponsableTrafic']
if 'mobileResponsableTrafic' in request.params:
mobileResponsableTrafic = request.params['mobileResponsableTrafic']
if 'telephoneResponsableTrafic' in request.params:
telephoneResponsableTrafic = request.params['telephoneResponsableTrafic']
if 'faxResponsableTrafic' in request.params:
faxResponsableTrafic = request.params['faxResponsableTrafic']
if 'courrielResponsableTrafic' in request.params:
courrielResponsableTrafic = request.params['courrielResponsableTrafic']
if 'remarque' in request.params:
remarque = request.params['remarque']
"""
if 'urgence' in request.params:
urgence = request.params['urgence']
if urgence == 'true':
urgence = True
elif urgence == 'false':
urgence = False
else:
urgence = None
"""
# Check date_debut, if less than 24h, urgence=true
urgence = False
if dateDebut != None and heureDebut != None:
date_time_str = str(dateDebut) + ' ' + str(heureDebut)
date_time_obj = datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S')
now = datetime.datetime.now()
if date_time_obj >= now and date_time_obj <= now + timedelta(days=1):
urgence = True
if 'etat' in request.params:
etat = request.params['etat']
etat = int(etat) if etat != None and etat != '' else int(settings['perturbation_etat_attente_code'])
# If urgence == true, etat = accepté
if urgence == True:
etat = settings['perturbation_etat_acceptee_code']
# If urgence == False, Check role Trafic
else:
user_can_update_etat_perturbation = Utils.user_can_update_etat_perturbation(request, current_user_id, idPerturbation)
# If not authorized, force default etat (en attente)
if not user_can_update_etat_perturbation:
etat = int(settings['perturbation_etat_attente_code'])
if int(etat) == int(settings['perturbation_etat_acceptee_code']):
dateValidation = datetime.datetime.today().strftime('%Y-%m-%d')
utilisateurValidation = current_user_id
"""
if 'dateValidation' in request.params:
dateValidation = request.params['dateValidation']
if 'utilisateurValidation' in request.params:
utilisateurValidation = request.params['utilisateurValidation']
"""
if 'decision' in request.params:
decision = request.params['decision']
if 'dateDecision' in request.params:
dateDecision = request.params['dateDecision']
"""
if 'ajoutePar' in request.params:
ajoutePar = request.params['ajoutePar']
if 'dateAjout' in request.params:
dateAjout = request.params['dateAjout']
if 'modifiePar' in request.params:
modifiePar = request.params['modifiePar']
if 'dateModification' in request.params:
dateModification = request.params['dateModification']
if 'dateSuppression' in request.params:
dateSuppression = request.params['dateSuppression']
"""
if 'geometries_reperages' in request.params:
geometries_reperages = request.params['geometries_reperages']
# Read contacts à aviser
if 'contacts_a_aviser' in request.params:
contacts_a_aviser = request.params['contacts_a_aviser']
# Read fermeture params
if '_deviation' in request.params:
_deviation = request.params['_deviation']
if '_idResponsable' in request.params:
_idResponsable = request.params['_idResponsable']
# Read occupation params
if '_idResponsableRegulation' in request.params:
_idResponsableRegulation = request.params['_idResponsableRegulation']
if '_typeOccupation' in request.params:
_typeOccupation = request.params['_typeOccupation']
if '_typeRegulation' in request.params:
_typeRegulation = request.params['_typeRegulation']
if '_voiesCondamnees' in request.params:
_voiesCondamnees = request.params['_voiesCondamnees']
if '_largeurGabarit' in request.params:
_largeurGabarit = request.params['_largeurGabarit']
if '_hauteurGabarit' in request.params:
_hauteurGabarit = request.params['_hauteurGabarit']
if '_heurePointe' in request.params:
_heurePointe = request.params['_heurePointe']
if _heurePointe == 'true':
_heurePointe = True
elif _heurePointe == 'false':
_heurePointe = False
else:
_heurePointe = None
if '_weekEnd' in request.params:
_weekEnd = request.params['_weekEnd']
if str(_weekEnd) == 'true':
_weekEnd = True
elif str(_weekEnd) == 'false':
_weekEnd = False
else:
_weekEnd = False
# Read params deviations
if 'geometries_deviations' in request.params and request.params['geometries_deviations'] != '':
geometries_deviations = request.params['geometries_deviations']
with transaction.manager:
perturbation_query = request.dbsession.query(models.Perturbation).filter(
models.Perturbation.id == idPerturbation)
if perturbation_query.count() == 0:
raise CustomError('{} with id {} not found'.format(models.Perturbation.__tablename__, idPerturbation))
perturbation_record = perturbation_query.first()
# If urgence == false, check etat
if not urgence:
# Lorsque la date est modifiée, l'état repasse en attente.
if dateDebut is not None and perturbation_record.date_debut is not None and str(perturbation_record.date_debut) != dateDebut:
etat = settings['perturbation_etat_attente_code']
# Lorsque le tracé est modifié, l'état repasse en attente.
if int(etat) != int(settings['perturbation_etat_attente_code']):
is_geometries_equal = Utils.compare_perturbation_geometries(request, idPerturbation, geometries_reperages)
if not is_geometries_equal:
etat = settings['perturbation_etat_attente_code']
# Historiser les changements d'état
etat_old_str = str(perturbation_record.etat) if perturbation_record.etat is not None else ''
etat_new_str = str(etat) if etat is not None else ''
if etat_old_str != etat_new_str:
Utils.add_historique_etat_perturbation(request, current_user_id, idPerturbation, etat)
etat_updated = True
if perturbation_record.urgence != urgence:
urgence_updated = True
perturbation_record.id_evenement = idEvenement
perturbation_record.id_responsable_trafic = idResponsableTrafic
perturbation_record.type = type
perturbation_record.tranche_horaire = trancheHoraire
perturbation_record.description = description
perturbation_record.date_debut = dateDebut
perturbation_record.heure_debut = heureDebut
perturbation_record.date_fin = dateFin
perturbation_record.heure_fin = heureFin
perturbation_record.localisation = localisation
perturbation_record.nom_responsable_trafic = nomResponsableTrafic
perturbation_record.prenom_responsable_trafic = prenomResponsableTrafic
perturbation_record.mobile_responsable_trafic = mobileResponsableTrafic
perturbation_record.telephone_responsable_trafic = telephoneResponsableTrafic
perturbation_record.fax_responsable_trafic = faxResponsableTrafic
perturbation_record.courriel_responsable_trafic = courrielResponsableTrafic
perturbation_record.remarque = remarque
perturbation_record.urgence = urgence
perturbation_record.etat = etat
perturbation_record.date_validation = dateValidation
perturbation_record.id_utilisateur_validation = utilisateurValidation
perturbation_record.decision = decision
perturbation_record.date_decision = dateDecision
# perturbation_record.id_utilisateur_ajout = ajoutePar
# perturbation_record.date_ajout = dateAjout
perturbation_record.id_utilisateur_modification = current_user_id
# perturbation_record.date_modification = func.now()
# perturbation_record.date_suppression = dateSuppression
# Contacts à aviser
# Delete old avis perturbation
request.dbsession.query(models.AvisPerturbation).filter(
models.AvisPerturbation.id_perturbation == idPerturbation).delete(synchronize_session=False)
# Add contacts à aviser
if contacts_a_aviser != None:
json_contacts_a_aviser = json.loads(contacts_a_aviser)
for onecontactid in json_contacts_a_aviser:
contacts_a_aviser_ids_array.append(onecontactid)
avis_perturbation_model = models.AvisPerturbation(
id_perturbation=idPerturbation,
id_contact=onecontactid)
request.dbsession.add(avis_perturbation_model)
# Type perturbation : Fermeture
if int(type) == int(settings['fermeture_perturbation_id']):
fermeture_query = request.dbsession.query(models.Fermeture).filter(
models.Fermeture.id_perturbation == idPerturbation)
if fermeture_query.count() == 0:
raise CustomError(
'{} with id_perturbation {} not found'.format(models.Fermeture.__tablename__, idPerturbation))
fermeture_record = fermeture_query.first()
fermeture_record.id_perturbation = idPerturbation
fermeture_record.deviation = _deviation
fermeture_record.id_responsable = _idResponsable
# Type perturbation : Occupation
elif int(type) == int(settings['occupation_perturbation_id']):
occupation_query = request.dbsession.query(models.Occupation).filter(
models.Occupation.id_perturbation == idPerturbation)
if occupation_query.count() == 0:
raise CustomError(
'{} with id perturbation {} not found'.format(models.Occupation.__tablename__, idPerturbation))
occupation_record = occupation_query.first()
occupation_record.id_perturbation = idPerturbation
occupation_record.id_responsable_regulation = _idResponsableRegulation
occupation_record.type_regulation = _typeRegulation
occupation_record.voies_condamnees = _voiesCondamnees
occupation_record.largeur_gabarit = _largeurGabarit
occupation_record.hauteur_gabarit = _hauteurGabarit
occupation_record.heure_pointe = _heurePointe
occupation_record.week_end = _weekEnd
occupation_record.type_occupation = _typeOccupation
# Geometries_reperages
# Get perturbations id having lines
perturbation_ligne_ids = []
for item in request.dbsession.query(models.PerturbationLigne.id).filter(
models.PerturbationLigne.id_perturbation == idPerturbation).all():
perturbation_ligne_ids.append(item.id)
# Delete old geometries and reperages
request.dbsession.query(models.Reperage).filter(
models.Reperage.id_perturbation_ligne.in_(perturbation_ligne_ids)).delete(synchronize_session=False)
request.dbsession.query(models.PerturbationPoint).filter(
models.PerturbationPoint.id_perturbation == idPerturbation).delete()
request.dbsession.query(models.PerturbationLigne).filter(
models.PerturbationLigne.id_perturbation == idPerturbation).delete()
# Add new geometries
reperages_list = []
if geometries_reperages != None:
json_geometries_reperages = json.loads(geometries_reperages)
for onegeojson in json_geometries_reperages:
# Geometry
if 'geometry' in onegeojson:
geometry = onegeojson['geometry']
if 'type' in geometry:
type_geom = geometry['type']
# Point
if type_geom == 'Point':
perturbation_point_model = models.PerturbationPoint(id_perturbation=idPerturbation)
perturbation_point_model.set_json_geometry(str(geometry), settings['srid'])
request.dbsession.add(perturbation_point_model)
# Line
elif type_geom == 'LineString' or type_geom == 'MultiLineString' or type_geom == 'GeometryCollection':
perturbation_ligne_model = models.PerturbationLigne(id_perturbation=idPerturbation)
perturbation_ligne_model.set_json_geometry(str(geometry), settings['srid'])
request.dbsession.add(perturbation_ligne_model)
if 'reperage' in onegeojson:
request.dbsession.flush()
reperage = onegeojson['reperage']
reperage_model = models.Reperage(
id_perturbation_ligne=perturbation_ligne_model.id,
id_deviation=reperage['idDeviation'],
proprietaire=reperage['proprietaire'],
axe=reperage['axe'],
sens=reperage['sens'],
pr_debut=reperage['prDebut'],
pr_debut_distance=reperage['prDebutDistance'],
pr_fin=reperage['prFin'],
pr_fin_distance=reperage['prFinDistance'],
ecartd=reperage['ecartd'],
ecartf=reperage['ecartf'],
usage_neg=reperage['usageNeg'],
f_surf=reperage['fSurf'],
f_long=reperage['fLong']
)
request.dbsession.add(reperage_model)
reperages_list.append(reperage_model)
# Geometries_deviations
request.dbsession.query(models.Deviation).filter(models.Deviation.id_perturbation == idPerturbation).delete(synchronize_session=False)
if geometries_deviations != None:
json_geometries_deviations = json.loads(geometries_deviations)
for onegeojson in json_geometries_deviations:
deviation_model = models.Deviation(id_perturbation=idPerturbation)
deviation_model.set_json_geometry(str(onegeojson), settings['srid'])
request.dbsession.add(deviation_model)
# Commit transaction
transaction.commit()
# Prepare mail to send
mail_dict = Utils.create_perturbation_mail_dict(request, perturbation_record, evenement_record,
_deviation)
# Reperages list
reperages_string = ''
for reperage_model in reperages_list:
reperages_string += '<td><p>{}</p></td><td><p>{}</p></td><td><p>{}</p></td><td><p>{}</p></td><td><p>{}</p></td><td><p>{}</p></td>'.format(
'???', reperage_model.axe, reperage_model.pr_debut, reperage_model.pr_debut_distance,
reperage_model.pr_fin, reperage_model.pr_fin_distance)
# Envoi email si fermeture d'urgence
# Envoi à la liste des personnes concernées par les fermetures d’urgence
if urgence_updated and perturbation_record.urgence:
mails_contacts_mails_fermeture_urgence = Utils.get_mails_contacts_mails_fermeture_urgence(
request)
if mails_contacts_mails_fermeture_urgence and len(mails_contacts_mails_fermeture_urgence) > 0:
PTMailer.send_templated_mail(request, mails_contacts_mails_fermeture_urgence,
settings['mail_fermeture_urgence_subject'],
'email_templates:fermeture_urgence', mail_dict,
reperages_string)
# Envoi d’emails régulier lors d’une fermeture et occupation
contacts_a_aviser_mails_array = []
# Envoi à la liste des personnes sélectionnées dans le formulaire
if len(contacts_a_aviser_ids_array) > 0:
contacts_a_aviser_mails_array = Utils.get_contacts_mails_by_ids(request, contacts_a_aviser_ids_array)
# If Accepté → Envoi au créateur
if etat_updated and int(etat) == int(settings['perturbation_etat_acceptee_code']):
connected_user = LDAPQuery.get_connected_user(request)
mail_att_name = settings['ldap_user_attribute_mail']
if connected_user and mail_att_name in connected_user:
connected_user_mail = connected_user[mail_att_name]
if not connected_user_mail in contacts_a_aviser_mails_array:
contacts_a_aviser_mails_array.append(connected_user_mail)
# If En attente → Envoi à l’approbateur = rôle trafic
elif etat_updated and int(etat) == int(settings['perturbation_etat_attente_code']):
contacts_a_aviser_mails_array += Utils.get_mails_of_contacts_belonging_to_a_group(request, settings[
'ldap_trafic_group_name'])
# Delete duplicates from array
contacts_a_aviser_mails_array = list(dict.fromkeys(contacts_a_aviser_mails_array))
if contacts_a_aviser_mails_array and len(contacts_a_aviser_mails_array) > 0:
PTMailer.send_templated_mail(request, contacts_a_aviser_mails_array,
'FERMETURE' if int(perturbation_record.type) == int(
settings['fermeture_perturbation_id']) else "OCCUPATION" if int(
perturbation_record.type) == int(
settings['occupation_perturbation_id']) else 'Info',
'email_templates:fermeture_occupation', mail_dict,
reperages_string)
# Envoi d’email en cas de SRB touché
# Envoi à la liste des personnes GMAR
evenement_pr_touche = Utils.check_evenement_pr_touche(request, idEvenement)
if evenement_pr_touche:
contacts_pr_touche = Utils.get_mails_contacts_pr_touche(request)
if contacts_pr_touche and len(contacts_pr_touche) > 0:
PTMailer.send_templated_mail(request, contacts_pr_touche,
settings['mail_srb_touche_subject'],
'email_templates:srb_touche', mail_dict,
reperages_string)
except HTTPForbidden as e:
raise HTTPForbidden()
except CustomError as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
except Exception as e:
transaction.abort()
request.dbsession.rollback()
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Etat perturbation by id view
########################################################
@view_config(route_name='etat_perturbation_by_id', request_method='GET', renderer='json')
def etat_perturbation_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.EtatPerturbation)
result = query.filter(models.EtatPerturbation.id == request.matchdict['id']).first()
if not result:
raise Exception(id_not_found_exception)
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return result
########################################################
# Etats perturbations view
########################################################
@view_config(route_name='etats_perturbations', request_method='GET', renderer='json')
@view_config(route_name='etats_perturbations_slash', request_method='GET', renderer='json')
def etats_perturbations_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.EtatPerturbation).all()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return query
########################################################
# Perturbations impression by id
########################################################
@view_config(route_name='perturbation_impression_by_id', request_method='GET', renderer='json')
def perturbation_impression_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
# Check authorization
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
current_user_id = Utils.get_connected_user_id(request)
id = request.matchdict['id']
# Check if the user has permission to read perturbation
user_can_read_perturbation = Utils.user_can_read_perturbation(request, current_user_id, id)
if not user_can_read_perturbation:
raise HTTPForbidden()
query = request.dbsession.query(models.PerturbationImpression).filter(models.PerturbationImpression.id == id).first()
if not query:
raise Exception(id_not_found_exception)
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return query.format()
########################################################
# Destinataire facturation by id view
########################################################
@view_config(route_name='destinataire_facturation_by_id', request_method='GET', renderer='json')
def destinataire_facturation_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.DestinataireFacturation)
result = query.filter(models.DestinataireFacturation.id == request.matchdict['id']).first()
if not result:
raise Exception(id_not_found_exception)
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return result
########################################################
# Destinataires facturation view
########################################################
@view_config(route_name='destinataires_facturation', request_method='GET', renderer='json')
@view_config(route_name='destinataires_facturation_slash', request_method='GET', renderer='json')
def destinataires_facturation_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.DestinataireFacturation).all()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return query
########################################################
# Categorie chantier by id view
########################################################
@view_config(route_name='categorie_chantier_by_id', request_method='GET', renderer='json')
def categorie_chantier_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.CategorieChantier)
result = query.filter(models.CategorieChantier.id == request.matchdict['id']).first()
if not result:
raise Exception(id_not_found_exception)
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return result
########################################################
# Categories chantiers view
########################################################
@view_config(route_name='categories_chantiers', request_method='GET', renderer='json')
@view_config(route_name='categories_chantiers_slash', request_method='GET', renderer='json')
def categories_chantiers_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.CategorieChantier).all()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return query
########################################################
# Type reperage by id view
########################################################
@view_config(route_name='type_reperage_by_id', request_method='GET', renderer='json')
def type_reperage_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.TypeReperage)
result = query.filter(models.TypeReperage.id == request.matchdict['id']).first()
if not result:
raise Exception(id_not_found_exception)
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return result
########################################################
# Types reperages view
########################################################
@view_config(route_name='types_reperages', request_method='GET', renderer='json')
@view_config(route_name='types_reperages_slash', request_method='GET', renderer='json')
def types_reperages_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.TypeReperage).all()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return query
########################################################
# Contact by id view
########################################################
@view_config(route_name='contact_by_id', request_method='GET', renderer='json')
def contact_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.Contact)
result = query.filter(models.Contact.id == request.matchdict['id']).first()
if not result:
raise Exception(id_not_found_exception)
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return result
########################################################
# Contacts view
########################################################
@view_config(route_name='contacts', request_method='GET', renderer='json')
@view_config(route_name='contacts_slash', request_method='GET', renderer='json')
def contacts_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.Contact).all()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return query
########################################################
# Add Contact view
########################################################
@view_config(route_name='contacts', request_method='POST', renderer='json')
@view_config(route_name='contacts_slash', request_method='POST', renderer='json')
def add_contact_view(request):
try:
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
current_user_id = Utils.get_connected_user_id(request)
current_user_id = int(current_user_id) if current_user_id else None
#Check if the user has permission to add contact
if current_user_id:
query_permission = request.dbsession.query(models.AutorisationFonction).filter(models.AutorisationFonction.id_utilisateur == current_user_id and models.AutorisationFonction.ajouter_contact == True).first()
if not query_permission:
raise HTTPForbidden()
# Default params value
nom = None
prenom = None
idOrganisme = None
mobile = None
telephone = None
courriel = None
login = None
forcerAjout = None
# Read params
if ('nom' in request.params):
nom = request.params['nom']
if ('prenom' in request.params):
prenom = request.params['prenom']
if ('idOrganisme' in request.params):
idOrganisme = request.params['idOrganisme']
if ('mobile' in request.params):
mobile = request.params['mobile']
if ('telephone' in request.params):
telephone = request.params['telephone']
if ('courriel' in request.params):
courriel = request.params['courriel']
if ('login' in request.params):
login = request.params['login']
if ('forcerAjout' in request.params):
forcerAjout = request.params['forcerAjout']
if forcerAjout == 'true':
forcerAjout = True
elif forcerAjout == 'false':
forcerAjout = False
# Check if contact already exists
if not forcerAjout:
query = request.dbsession.query(models.Contact).filter(and_(func.lower(models.Contact.prenom) == func.lower(prenom), func.lower(models.Contact.nom) == func.lower(nom), models.Contact.mobile == mobile, func.lower(models.Contact.courriel) == func.lower(courriel), func.lower(models.Contact.login) == func.lower(login))).all()
if len(query) > 0:
return {'error': 'true', 'code': 500, 'message': 'Contact already exists'}
with transaction.manager as tm:
model = models.Contact(
id_organisme=idOrganisme,
login=login,
nom=nom,
prenom=prenom,
telephone=telephone,
mobile=mobile,
courriel=courriel)
request.dbsession.add(model)
transaction.commit()
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as e:
transaction.abort()
request.dbsession.rollback()
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Update Contact view
########################################################
@view_config(route_name='contacts', request_method='PUT', renderer='json')
@view_config(route_name='contacts_slash', request_method='PUT', renderer='json')
def update_contact_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
current_user_id = Utils.get_connected_user_id(request)
current_user_id = int(current_user_id) if current_user_id else None
if current_user_id is None:
raise HTTPForbidden()
# Check if the user has permission to modify contact
query_permission = request.dbsession.query(models.AutorisationFonction).filter(
models.AutorisationFonction.id_utilisateur == current_user_id and models.AutorisationFonction.modifier_contact == True).first()
if not query_permission:
raise HTTPForbidden()
# Default params value
id = None
nom = None
prenom = None
idOrganisme = None
mobile = None
telephone = None
courriel = None
login = None
# Read params
if ('id' in request.params and request.params['id'] != ''):
id = request.params['id']
if ('nom' in request.params):
nom = request.params['nom']
if ('prenom' in request.params):
prenom = request.params['prenom']
if ('idOrganisme' in request.params):
idOrganisme = request.params['idOrganisme']
if ('mobile' in request.params):
mobile = request.params['mobile']
if ('telephone' in request.params):
telephone = request.params['telephone']
if ('courriel' in request.params):
courriel = request.params['courriel']
if ('login' in request.params):
login = request.params['login']
with transaction.manager as tm:
contact_query = request.dbsession.query(models.Contact).filter(models.Contact.id == id)
if contact_query.count() > 0:
contact_record = contact_query.first()
contact_record.id_organisme = idOrganisme
contact_record.login = login
contact_record.nom = nom
contact_record.prenom = prenom
contact_record.telephone = telephone
contact_record.courriel = courriel
transaction.commit()
else:
raise Exception(id_not_found_exception)
except Exception as e:
transaction.abort()
request.dbsession.rollback()
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Delete contact by id view
########################################################
@view_config(route_name='contact_by_id', request_method='DELETE', renderer='json')
def delete_contact_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
current_user_id = Utils.get_connected_user_id(request)
current_user_id = int(current_user_id) if current_user_id else None
if current_user_id is None:
raise HTTPForbidden()
# Check if the user has permission to delete contact
query_permission = request.dbsession.query(models.AutorisationFonction).filter(
models.AutorisationFonction.id_utilisateur == current_user_id and models.AutorisationFonction.supprimer_contact == True).first()
if not query_permission:
raise HTTPForbidden()
id = request.matchdict['id']
query = request.dbsession.query(models.Contact)
contact = query.filter(models.Contact.id == id).first()
if not contact:
raise Exception(id_not_found_exception)
with transaction.manager:
request.dbsession.delete(contact)
# Commit transaction
transaction.commit()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Contact having login
########################################################
@view_config(route_name='contacts_having_login', request_method='GET', renderer='json')
@view_config(route_name='contacts_having_login_slash', request_method='GET', renderer='json')
def contacts_having_login_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.Contact, models.Organisme).filter(models.Contact.id_organisme == models.Organisme.id).filter(models.Contact.login.isnot(None)).all()
result = []
for contact, organisme in query:
contact_json = contact.format()
contact_json['nom_organisme'] = organisme.nom
# Entites
query_entites = request.dbsession.query(models.LienContactEntite, models.Entite).filter(models.LienContactEntite.id_contact == contact.id).filter(models.LienContactEntite.id_entite == models.Entite.id).all()
entites = []
for le, e in query_entites:
entites.append(e.nom)
#Roles
query_roles = request.dbsession.query(models.FonctionContact.fonction).filter(
models.FonctionContact.id_contact == contact.id).all()
roles = []
for fc in query_roles:
roles.append(fc.fonction)
contact_json['entites'] = entites
contact_json['roles'] = roles
result.append(contact_json)
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return result
########################################################
# Contacts by entite view
########################################################
@view_config(route_name='contacts_entite', request_method='GET', renderer='json')
@view_config(route_name='contacts_entite_slash', request_method='GET', renderer='json')
def contacts_entite_view(request):
entite_err_msg = 'idEntite parameter is empty'
result = []
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
idEntite = None
if ('idEntite' in request.params):
idEntite = request.params['idEntite']
if idEntite is None:
raise Exception(entite_err_msg)
query = request.dbsession.query(models.Contact, models.LienContactEntite).filter(models.Contact.id == models.LienContactEntite.id_contact).filter(models.LienContactEntite.id_entite == idEntite).all()
for c, lce in query:
result.append(c.format())
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': entite_err_msg if str(e) == entite_err_msg else general_exception}
return result
########################################################
# Get Contact_potentiel_avis_perturbation view
########################################################
@view_config(route_name='contacts_potentiels_avis_perturbation', request_method='GET', renderer='json')
@view_config(route_name='contacts_potentiels_avis_perturbation_slash', request_method='GET', renderer='json')
def contact_potentiel_avis_perturbation_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
result = []
for cp, c, o in request.dbsession.query(models.ContactPotentielAvisPerturbation, models.Contact, models.Organisme).filter(
models.ContactPotentielAvisPerturbation.id_contact == models.Contact.id).filter(
models.Contact.id_organisme == models.Organisme.id).all():
result.append(cp.format(c.nom, c.prenom, o.nom))
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return result
########################################################
# Add Contact_potentiel_avis_perturbation view
########################################################
@view_config(route_name='contacts_potentiels_avis_perturbation', request_method='POST', renderer='json')
@view_config(route_name='contacts_potentiels_avis_perturbation_slash', request_method='POST', renderer='json')
def add_contact_potentiel_avis_perturbation_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
# Default params value
idEntite = None
idContact = None
envoiAutoOccupation = None
envoiAutoFermeture = None
# Read params
if ('idEntite' in request.params):
idEntite = request.params['idEntite']
if ('idContact' in request.params):
idContact = request.params['idContact']
if ('envoiAutoOccupation' in request.params):
envoiAutoOccupation = request.params['envoiAutoOccupation']
if envoiAutoOccupation == 'true':
envoiAutoOccupation = True
elif envoiAutoOccupation == 'false':
envoiAutoOccupation = False
if ('envoiAutoFermeture' in request.params):
envoiAutoFermeture = request.params['envoiAutoFermeture']
if envoiAutoFermeture == 'true':
envoiAutoFermeture = True
elif envoiAutoFermeture == 'false':
envoiAutoFermeture = False
with transaction.manager as tm:
model = models.ContactPotentielAvisPerturbation(
id_entite=idEntite,
id_contact=idContact,
envoi_auto_occupation=envoiAutoOccupation,
envoi_auto_fermeture=envoiAutoFermeture)
request.dbsession.add(model)
transaction.commit()
except Exception as e:
transaction.abort()
request.dbsession.rollback()
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Update Contact_potentiel_avis_perturbation view
########################################################
@view_config(route_name='contacts_potentiels_avis_perturbation', request_method='PUT', renderer='json')
@view_config(route_name='contacts_potentiels_avis_perturbation_slash', request_method='PUT', renderer='json')
def update_contact_potentiel_avis_perturbation_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
# Default params value
id = None
idEntite = None
idContact = None
envoiAutoOccupation = None
envoiAutoFermeture = None
# Read params
if ('id' in request.params and request.params['id'] != ''):
id = request.params['id']
if ('idEntite' in request.params):
idEntite = request.params['idEntite']
if ('idContact' in request.params):
idContact = request.params['idContact']
if ('envoiAutoOccupation' in request.params):
envoiAutoOccupation = request.params['envoiAutoOccupation']
if envoiAutoOccupation == 'true':
envoiAutoOccupation = True
elif envoiAutoOccupation == 'false':
envoiAutoOccupation = False
if ('envoiAutoFermeture' in request.params):
envoiAutoFermeture = request.params['envoiAutoFermeture']
if envoiAutoFermeture == 'true':
envoiAutoFermeture = True
elif envoiAutoFermeture == 'false':
envoiAutoFermeture = False
with transaction.manager as tm:
contact_query = request.dbsession.query(models.ContactPotentielAvisPerturbation).filter(
models.ContactPotentielAvisPerturbation.id == id)
if contact_query.count() > 0:
contact_record = contact_query.first()
contact_record.id_entite = idEntite
contact_record.id_contact = idContact
contact_record.envoi_auto_occupation = envoiAutoOccupation
contact_record.envoi_auto_fermeture = envoiAutoFermeture
transaction.commit()
else:
raise Exception(id_not_found_exception)
except Exception as e:
transaction.abort()
request.dbsession.rollback()
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Delete Contact_potentiel_avis_perturbation by id view
########################################################
@view_config(route_name='contacts_potentiels_avis_perturbation_by_id', request_method='DELETE', renderer='json')
def delete_contact_potentiels_avis_perturbation_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
id = request.matchdict['id']
query = request.dbsession.query(models.ContactPotentielAvisPerturbation)
contact = query.filter(models.ContactPotentielAvisPerturbation.id == id).first()
if not contact:
raise Exception(id_not_found_exception)
with transaction.manager:
request.dbsession.delete(contact)
# Commit transaction
transaction.commit()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Get Contact_potentiel_avis_perturbation view
########################################################
@view_config(route_name='contacts_avis_fermeture_urgence', request_method='GET', renderer='json')
@view_config(route_name='contacts_avis_fermeture_urgence_slash', request_method='GET', renderer='json')
def contact_avis_fermeture_urgence_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
result = []
for cp, c, o in request.dbsession.query(models.ContactAvisFermetureUrgence, models.Contact, models.Organisme).filter(
models.ContactAvisFermetureUrgence.id_contact == models.Contact.id).filter(
models.Contact.id_organisme == models.Organisme.id).all():
result.append(cp.format(c.nom, c.prenom, o.nom))
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return result
########################################################
# Add Contact_avis_fermeture_urgence view
########################################################
@view_config(route_name='contacts_avis_fermeture_urgence', request_method='POST', renderer='json')
@view_config(route_name='contacts_avis_fermeture_urgence_slash', request_method='POST', renderer='json')
def add_avis_fermeture_urgence_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
# Default params value
idContact = None
# Read params
if ('idContact' in request.params):
idContact = request.params['idContact']
#Check if contact already exists
contact_query = request.dbsession.query(models.ContactAvisFermetureUrgence).filter(
models.ContactAvisFermetureUrgence.id_contact == idContact)
if contact_query.count() > 0:
return {'message': 'Data successfully saved'}
with transaction.manager as tm:
model = models.ContactAvisFermetureUrgence(
id_contact=idContact)
request.dbsession.add(model)
transaction.commit()
except Exception as e:
transaction.abort()
request.dbsession.rollback()
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Update Contact_avis_fermeture_urgence view
########################################################
@view_config(route_name='contacts_avis_fermeture_urgence', request_method='PUT', renderer='json')
@view_config(route_name='contacts_avis_fermeture_urgence_slash', request_method='PUT', renderer='json')
def update_avis_fermeture_urgence_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
# Default params value
id = None
idContact = None
# Read params
if ('id' in request.params and request.params['id'] != ''):
id = request.params['id']
if ('idContact' in request.params):
idContact = request.params['idContact']
with transaction.manager as tm:
contact_query = request.dbsession.query(models.ContactAvisFermetureUrgence).filter(
models.ContactAvisFermetureUrgence.id == id)
if contact_query.count() > 0:
contact_record = contact_query.first()
contact_record.id_contact = idContact
transaction.commit()
else:
raise Exception(id_not_found_exception)
except Exception as e:
transaction.abort()
request.dbsession.rollback()
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Delete contacts_avis_fermeture_urgence by id view
########################################################
@view_config(route_name='contacts_avis_fermeture_urgence_by_id', request_method='DELETE', renderer='json')
def delete_avis_fermeture_urgence_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
id = request.matchdict['id']
query = request.dbsession.query(models.ContactAvisFermetureUrgence)
contact = query.filter(models.ContactAvisFermetureUrgence.id == id).first()
if not contact:
raise Exception(id_not_found_exception)
with transaction.manager:
request.dbsession.delete(contact)
# Commit transaction
transaction.commit()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Get contact_avis_pr_touche view
########################################################
@view_config(route_name='contact_avis_pr_touche', request_method='GET', renderer='json')
@view_config(route_name='contact_avis_pr_touche_slash', request_method='GET', renderer='json')
def contact_avis_pr_touche_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
result = []
for cp, c, o in request.dbsession.query(models.ContactAvisPrTouche, models.Contact, models.Organisme).filter(
models.ContactAvisPrTouche.id_contact == models.Contact.id).filter(
models.Contact.id_organisme == models.Organisme.id).all():
result.append(cp.format(c.nom, c.prenom, o.nom))
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return result
########################################################
# Add Contact_avis_pr_touche view
########################################################
@view_config(route_name='contact_avis_pr_touche', request_method='POST', renderer='json')
@view_config(route_name='contact_avis_pr_touche_slash', request_method='POST', renderer='json')
def add_contact_avis_pr_touche_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
# Default params value
idContact = None
# Read params
if ('idContact' in request.params):
idContact = request.params['idContact']
# Check if contact already exists
contact_query = request.dbsession.query(models.ContactAvisPrTouche).filter(
models.ContactAvisPrTouche.id_contact == idContact)
if contact_query.count() > 0:
return {'message': 'Data successfully saved'}
with transaction.manager as tm:
model = models.ContactAvisPrTouche(
id_contact=idContact)
request.dbsession.add(model)
transaction.commit()
except Exception as e:
transaction.abort()
request.dbsession.rollback()
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Update Contact_avis_pr_touche view
########################################################
@view_config(route_name='contact_avis_pr_touche', request_method='PUT', renderer='json')
@view_config(route_name='contact_avis_pr_touche_slash', request_method='PUT', renderer='json')
def update_avis_fermeture_urgence_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
# Default params value
id = None
idContact = None
# Read params
if ('id' in request.params and request.params['id'] != ''):
id = request.params['id']
if ('idContact' in request.params):
idContact = request.params['idContact']
with transaction.manager as tm:
contact_query = request.dbsession.query(models.ContactAvisPrTouche).filter(
models.ContactAvisPrTouche.id == id)
if contact_query.count() > 0:
contact_record = contact_query.first()
contact_record.id_contact = idContact
transaction.commit()
else:
raise Exception(id_not_found_exception)
except Exception as e:
transaction.abort()
request.dbsession.rollback()
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Delete Contact_avis_pr_touche_by_id by id view
########################################################
@view_config(route_name='contact_avis_pr_touche_by_id', request_method='DELETE', renderer='json')
def delete_contact_avis_pr_touche_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
id = request.matchdict['id']
query = request.dbsession.query(models.ContactAvisPrTouche)
contact = query.filter(models.ContactAvisPrTouche.id == id).first()
if not contact:
raise Exception(id_not_found_exception)
with transaction.manager:
request.dbsession.delete(contact)
# Commit transaction
transaction.commit()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Organisme by id view
########################################################
@view_config(route_name='organisme_by_id', request_method='GET', renderer='json')
def organisme_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.Organisme)
result = query.filter(models.Organisme.id == request.matchdict['id']).first()
if not result:
raise Exception(id_not_found_exception)
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return result
########################################################
# Suggestion by liste name view
########################################################
@view_config(route_name='suggestion_by_liste_name', request_method='GET', renderer='json')
def suggestion_by_liste_name_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.Suggestion).filter(models.Suggestion.liste == request.matchdict['id']).all()
if not query:
raise Exception(id_not_found_exception)
else:
array = []
for item in query:
array.append(item.valeur)
return array
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return []
########################################################
# Organismes view
########################################################
@view_config(route_name='organismes', request_method='GET', renderer='json')
@view_config(route_name='organismes_slash', request_method='GET', renderer='json')
def organismes_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.Organisme).all()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return query
########################################################
# Add organisme view
########################################################
@view_config(route_name='organismes', request_method='POST', renderer='json')
@view_config(route_name='organismes_slash', request_method='POST', renderer='json')
def add_organisme_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
# Default params value
nom = None
adresse = None
localite = None
telephone = None
fax = None
courriel = None
forcerAjout = None
# Read params
if ('nom' in request.params):
nom = request.params['nom']
if ('adresse' in request.params):
adresse = request.params['adresse']
if ('localite' in request.params):
localite = request.params['localite']
if ('telephone' in request.params):
telephone = request.params['telephone']
if ('fax' in request.params):
fax = request.params['fax']
if ('courriel' in request.params):
courriel = request.params['courriel']
# Check if force ajout
if ('forcerAjout' in request.params):
forcerAjout = request.params['forcerAjout']
if forcerAjout == 'true':
forcerAjout = True
elif forcerAjout == 'false':
forcerAjout = False
# Check if contact already exists
if not forcerAjout:
query = request.dbsession.query(models.Organisme).filter(or_(
func.lower(models.Organisme.nom) == func.lower(nom), and_(models.Organisme.telephone == telephone, models.Organisme.telephone != None), and_(models.Organisme.fax == fax, models.Organisme.fax != None),
and_(func.lower(models.Organisme.courriel) == func.lower(courriel), models.Organisme.courriel != None))).all()
if len(query) > 0:
return {'error': 'true', 'code': 500, 'message': 'Organisme already exists'}
with transaction.manager as tm:
model = models.Organisme(
nom=nom,
adresse=adresse,
localite=localite,
telephone=telephone,
fax=fax,
courriel=courriel)
request.dbsession.add(model)
transaction.commit()
except Exception as e:
transaction.abort()
request.dbsession.rollback()
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Update organisme view
########################################################
@view_config(route_name='organismes', request_method='PUT', renderer='json')
@view_config(route_name='organismes_slash', request_method='PUT', renderer='json')
def update_organisme_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
# Default params value
id = None
nom = None
adresse = None
localite = None
telephone = None
fax = None
courriel = None
# Read params
if ('id' in request.params and request.params['id'] != ''):
id = request.params['id']
if ('nom' in request.params):
nom = request.params['nom']
if ('adresse' in request.params):
adresse = request.params['adresse']
if ('localite' in request.params):
localite = request.params['localite']
if ('telephone' in request.params):
telephone = request.params['telephone']
if ('fax' in request.params):
fax = request.params['fax']
if ('courriel' in request.params):
courriel = request.params['courriel']
query = request.dbsession.query(models.Organisme).filter(models.Organisme.id == id)
with transaction.manager as tm:
if query.count() > 0:
record = query.first()
record.nom = nom
record.adresse = adresse
record.localite = localite
record.telephone = telephone
record.fax = fax
record.courriel = courriel
transaction.commit()
else:
raise Exception(id_not_found_exception)
except Exception as e:
tm.abort()
request.dbsession.rollback()
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Delete organisme by id view
########################################################
@view_config(route_name='organisme_by_id', request_method='DELETE', renderer='json')
def delete_organisme_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
id = request.matchdict['id']
query = request.dbsession.query(models.Organisme)
organisme = query.filter(models.Organisme.id == id).first()
if not organisme:
raise Exception(id_not_found_exception)
with transaction.manager:
request.dbsession.delete(organisme)
# Commit transaction
transaction.commit()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Axes routiers view
########################################################
@view_config(route_name='axes_routiers', request_method='GET', renderer='json')
@view_config(route_name='axes_routiers_slash', request_method='GET', renderer='json')
def axes_routiers_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
return request.dbsession.query(models.Axe).all()
except Exception as error:
log.error(str(error))
return {'error': 'true', 'code': 500, 'message': general_exception}
return []
########################################################
# PR par axe routier view
########################################################
@view_config(route_name='pr_par_axe_routier', request_method='GET', renderer='json')
@view_config(route_name='pr_par_axe_routier_slash', request_method='GET', renderer='json')
def pr_par_axe_routier_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
if ('id' in request.params):
query = request.dbsession.query(models.Secteur).filter(models.Secteur.axe_nom_complet == request.params['id']).all()
if query:
result = []
for item in query:
result.append(item.format())
return result
except Exception as error:
log.error(str(error))
return {'error': 'true', 'code': 500, 'message': general_exception}
return []
########################################################
# Plans types chantiers view
########################################################
@view_config(route_name='plans_types_fouille', request_method='GET', renderer='json')
@view_config(route_name='plans_types_fouille_slash', request_method='GET', renderer='json')
def plans_types_fouille_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.PlanTypeFouille).all()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return query
########################################################
# Evenements XML
########################################################
@view_config(route_name='evenements_xml', request_method='GET', renderer='json')
@view_config(route_name='evenements_xml_slash', request_method='GET', renderer='json')
def evenements_xml_view(request):
try:
files_array = EvenementXML.list_folder_files(request)
successful_files = []
failed_files = []
for file in files_array:
file_json = EvenementXML.xml_to_json(file)
if file_json:
is_added = EvenementXML.add_file_data(file_json)
if is_added:
successful_files.append(file)
# Remove file if is added
EvenementXML.remove_file(request, file)
else:
failed_files.append(file)
except Exception as error:
log.error(str(error))
return {'error': 'true', 'code': 500, 'message': general_exception}
return {'successful_files' : successful_files, 'failed_files' : failed_files}
########################################################
# Login
########################################################
@view_config(route_name='login', request_method='POST', renderer='json')
@view_config(route_name='login_slash', request_method='POST', renderer='json')
def login_view(request):
response = None
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
login = None
password = None
if 'login' in request.params:
login = request.params['login']
if 'password' in request.params:
password = request.params['password']
#Check if user exists in DB
query = request.dbsession.query(models.Contact)
contact = query.filter(func.lower(models.Contact.login) == func.lower(login)).first()
if not contact:
raise Exception(user_not_found_exception)
# Check if user exists in LDAP
else:
entites = []
for lce, e in request.dbsession.query(models.LienContactEntite, models.Entite).filter(models.LienContactEntite.id_contact == contact.id).filter(models.Entite.id == models.LienContactEntite.id_entite).all():
entites.append(e.format())
response = LDAPQuery.do_login(request, login, password, contact, entites)
except Exception as error:
log.error(str(error))
request.response.status = 403
return {'error': 'true', 'code': 403, 'message': str(error)}
return response
########################################################
# Logout
########################################################
@view_config(route_name='logout', request_method='GET', renderer='json')
def logout_view(request):
response = None
try:
response = LDAPQuery.do_logout(request)
except Exception as error:
log.error(str(error))
return {'error': 'true', 'code': 403, 'message': str(error)}
return response
########################################################
# Logged user
########################################################
@view_config(route_name='logged_user', request_method='GET', renderer='json')
@view_config(route_name='logged_user_slash', request_method='GET', renderer='json')
def logged_user_view(request):
contact_json = None
try:
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
current_user_id = Utils.get_connected_user_id(request)
current_user_id = int(current_user_id) if current_user_id else None
if current_user_id:
# Check if user exists in DB
query = request.dbsession.query(models.Contact)
contact = query.filter(models.Contact.id == current_user_id).first()
if not contact:
raise Exception(user_not_found_exception)
# Entites
entites = []
for lce, e in request.dbsession.query(models.LienContactEntite, models.Entite).filter(
models.LienContactEntite.id_contact == contact.id).filter(
models.Entite.id == models.LienContactEntite.id_entite).all():
entites.append(e.format())
contact_json = contact.format()
contact_json['entites'] = entites
else:
raise Exception(user_not_found_exception)
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as error:
log.error(str(error))
return {'error': 'true', 'code': 403, 'message': str(error)}
return contact_json
########################################################
# Entites
########################################################
@view_config(route_name='entites', request_method='GET', renderer='json')
@view_config(route_name='entites_slash', request_method='GET', renderer='json')
def entites_view(request):
entites = []
try:
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
current_user_id = Utils.get_connected_user_id(request)
current_user_id = int(current_user_id) if current_user_id else None
if current_user_id:
for lce, e in request.dbsession.query(models.LienContactEntite, models.Entite).filter(models.LienContactEntite.id_contact == current_user_id).filter(models.Entite.id == models.LienContactEntite.id_entite).all():
entites.append(e.format())
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as error:
log.error(str(error))
return {'error': 'true', 'code': 403, 'message': str(error)}
return entites
########################################################
# Get nouveaux contacts AD
########################################################
@view_config(route_name='nouveaux_contacts_ad', request_method='GET', renderer='json')
@view_config(route_name='nouveaux_contacts_ad_slash', request_method='GET', renderer='json')
def get_nouveaux_contacts_ad_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
auth_tkt = request.cookies.get('auth_tkt', default=None)
group_id_attribute = settings['ldap_group_attribute_id']
group_name_attribute = settings['ldap_group_attribute_name']
ldap_entite_groups_prefix = settings['ldap_entite_groups_prefix']
ldap_fonction_groups_prefix = settings['ldap_fonction_groups_prefix']
login_attr = settings['ldap_user_attribute_login']
if not auth_tkt:
raise HTTPForbidden()
# Logins from AD
contacts_ad_json = LDAPQuery.get_users_belonging_to_group_entites(request)
# Logins from DB
contacts_bd_logins = []
contacts_bd_logins_query = request.dbsession.query(models.Contact).distinct(models.Contact.login).filter(models.Contact.login.isnot(None)).all()
for c in contacts_bd_logins_query:
contacts_bd_logins.append(c.login.upper())
result = []
for one_contact_ad_json in contacts_ad_json:
if one_contact_ad_json and login_attr in one_contact_ad_json:
if one_contact_ad_json[login_attr].upper() not in contacts_bd_logins:
groups = LDAPQuery.get_user_groups_by_dn(request, one_contact_ad_json['dn'])
entites = [{'id': x[group_id_attribute], 'name': x[group_name_attribute]} for x in groups if
group_id_attribute in x and x[group_id_attribute].startswith(ldap_entite_groups_prefix)]
roles = [{'id': x[group_id_attribute], 'name': x[group_name_attribute]} for x in groups if
group_id_attribute in x and x[group_id_attribute].startswith(ldap_fonction_groups_prefix)]
one_contact_ad_json['entites'] = entites
one_contact_ad_json['roles'] = roles
result.append(one_contact_ad_json)
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as error:
log.error(str(error))
return {'error': 'true', 'code': 403, 'message': str(error)}
return result
########################################################
# Add nouveaux contacts AD
########################################################
@view_config(route_name='nouveaux_contacts_ad', request_method='POST', renderer='json')
@view_config(route_name='nouveaux_contacts_ad_slash', request_method='POST', renderer='json')
def add_nouveaux_contacts_ad_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
auth_tkt = request.cookies.get('auth_tkt', default=None)
login_attr = settings['ldap_user_attribute_login']
if not auth_tkt:
raise HTTPForbidden()
contacts_ad_json = None
if 'contacts' in request.params:
contacts_ad_json = request.params['contacts']
if contacts_ad_json is None:
raise Exception('Parameter contacts is empty')
# Mise à jour groupes AD
is_groupes_ad_mis_a_jour = Utils.mise_a_jour_groupes_ad(request)
if is_groupes_ad_mis_a_jour:
contacts_ad_json = json.loads(contacts_ad_json)
# Get all entités
entites = {}
entites_query = request.dbsession.query(models.Entite).all()
for e in entites_query:
entites[e.nom_groupe_ad] = e.id
for one_contact_ad_json in contacts_ad_json:
with transaction.manager as tm:
# Add contact to DB
contact_model = models.Contact(
id_organisme=one_contact_ad_json['id_organisme'],
login=one_contact_ad_json[settings['ldap_user_attribute_login']],
nom=one_contact_ad_json[settings['ldap_user_attribute_lastname']],
prenom=one_contact_ad_json[settings['ldap_user_attribute_firstname']],
telephone=one_contact_ad_json[settings['ldap_user_attribute_telephone']],
#mobile=one_contact_ad_json[settings['ldap_user_attribute_mobile']],
courriel=one_contact_ad_json[settings['ldap_user_attribute_mail']])
request.dbsession.add(contact_model)
request.dbsession.flush()
max_contact_id = contact_model.id
# Add entites groups
groupes_entites = one_contact_ad_json['entites'] if 'entites' in one_contact_ad_json else None
if groupes_entites is not None and len(groupes_entites) > 0:
for one_contact_ldap_group_item in groupes_entites:
one_contact_ldap_group_id = one_contact_ldap_group_item['id']
one_contact_ldap_group_name = one_contact_ldap_group_item['name']
#Entite group
if one_contact_ldap_group_id.startswith(settings['ldap_entite_groups_prefix']):
id_entite = entites[one_contact_ldap_group_id] if entites and one_contact_ldap_group_id in entites else None
#If entite does not exist
if id_entite is None:
entite_model = models.Entite(
nom=one_contact_ldap_group_name,
id_responsable=settings['id_responsable_entite'],
nom_groupe_ad=one_contact_ldap_group_id
)
request.dbsession.add(entite_model)
request.dbsession.flush()
id_entite = entite_model.id
if id_entite is not None:
lien_entite_contact_model = models.LienContactEntite(
id_contact=max_contact_id,
id_entite=id_entite
)
request.dbsession.add(lien_entite_contact_model)
# Add Fonction group
groupes_fonctions = one_contact_ad_json['roles'] if 'roles' in one_contact_ad_json else None
if groupes_fonctions is not None and len(groupes_fonctions) > 0:
for one_contact_ldap_group_item in groupes_fonctions:
one_contact_ldap_group_id = one_contact_ldap_group_item['id']
fonction_contact_model = models.FonctionContact(
id_contact=max_contact_id,
fonction=one_contact_ldap_group_id
)
request.dbsession.add(fonction_contact_model)
transaction.commit()
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as e:
transaction.abort()
request.dbsession.rollback()
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Mise a jour des groupes AD
########################################################
@view_config(route_name='mise_a_jours_groupes_ad', request_method='GET', renderer='json')
@view_config(route_name='mise_a_jours_groupes_ad_slash', request_method='GET', renderer='json')
def mise_a_jours_groupes_ad_view(request):
result = {'message': 'AD groups updated'}
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
"""
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
"""
is_groupes_ad_mis_a_jour = Utils.mise_a_jour_groupes_ad(request)
if not is_groupes_ad_mis_a_jour:
raise Exception('An error occured while updating AD groups')
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as e:
transaction.abort()
request.dbsession.rollback()
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return result
########################################################
# Autorisations accordees
########################################################
@view_config(route_name='autorisations_accordees', request_method='GET', renderer='json')
@view_config(route_name='autorisations_accordees_slash', request_method='GET', renderer='json')
def autorisations_accordees_view(request):
result = []
entite_err_msg = 'idEntite parameter is empty'
try:
auth_tkt = request.cookies.get('auth_tkt', default=None)
idEntite = None
if not auth_tkt:
raise HTTPForbidden()
if ('idEntite' in request.params):
idEntite = request.params['idEntite']
if idEntite is None:
raise Exception(entite_err_msg)
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
current_user_id = Utils.get_connected_user_id(request)
current_user_id = int(current_user_id) if current_user_id else None
if current_user_id:
for d, c, o, lce in request.dbsession.query(models.Delegation, models.Contact,
models.Organisme, models.LienContactEntite).filter(
models.Delegation.id_delegant == current_user_id).filter(
models.Contact.id == models.Delegation.id_delegataire).filter(
models.Contact.id_organisme == models.Organisme.id).filter(
models.LienContactEntite.id_entite == idEntite).filter(
models.Delegation.id_delegataire == models.LienContactEntite.id_contact).all():
result.append(d.format(c.nom, c.prenom, o.nom))
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as error:
log.error(str(error))
return {'error': 'true', 'code': 500,
'message': entite_err_msg if str(error) == entite_err_msg else general_exception}
return result
#######################################################
# Autorisations recues
########################################################
@view_config(route_name='autorisations_recues', request_method='GET', renderer='json')
@view_config(route_name='autorisations_recues_slash', request_method='GET', renderer='json')
def autorisations_recues_view(request):
result = []
try:
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
current_user_id = Utils.get_connected_user_id(request)
current_user_id = int(current_user_id) if current_user_id else None
if current_user_id:
for d, c, o in request.dbsession.query(models.Delegation, models.Contact,
models.Organisme).filter(
models.Delegation.id_delegataire == current_user_id).filter(
models.Contact.id == models.Delegation.id_delegant).filter(
models.Contact.id_organisme == models.Organisme.id).all():
result.append(d.format(c.nom, c.prenom, o.nom))
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as error:
log.error(str(error))
return {'error': 'true', 'code': 403, 'message': str(error)}
return result
########################################################
# Add autorisation
########################################################
@view_config(route_name='autorisations', request_method='POST', renderer='json')
@view_config(route_name='autorisations_slash', request_method='POST', renderer='json')
def add_autorisations_view(request):
try:
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
current_user_id = Utils.get_connected_user_id(request)
current_user_id = int(current_user_id) if current_user_id else None
if current_user_id:
# Params
idDelegataire = None
autorisationLecture = None
autorisationModification = None
autorisationSuppression = None
with transaction.manager:
if 'idDelegataire' in request.params:
idDelegataire = request.params['idDelegataire']
if 'autorisationLecture' in request.params:
autorisationLecture = request.params['autorisationLecture']
if autorisationLecture == 'true':
autorisationLecture = True
elif autorisationLecture == 'false':
autorisationLecture = False
else:
autorisationLecture = None
if 'autorisationModification' in request.params:
autorisationModification = request.params['autorisationModification']
if autorisationModification == 'true':
autorisationModification = True
elif autorisationModification == 'false':
autorisationModification = False
else:
autorisationModification = None
if 'autorisationSuppression' in request.params:
autorisationSuppression = request.params['autorisationSuppression']
if autorisationSuppression == 'true':
autorisationSuppression = True
elif autorisationSuppression == 'false':
autorisationSuppression = False
else:
autorisationSuppression = None
model = models.Delegation(
id_delegant=current_user_id,
id_delegataire=idDelegataire,
autorisation_lecture=autorisationLecture,
autorisation_modification=autorisationModification,
autorisation_suppression=autorisationSuppression)
request.dbsession.add(model)
transaction.commit()
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as e:
transaction.abort()
request.dbsession.rollback()
return {'error': 'true', 'code': 500, 'message': general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Update autorisation
########################################################
@view_config(route_name='autorisations', request_method='PUT', renderer='json')
@view_config(route_name='autorisations_slash', request_method='PUT', renderer='json')
def update_autorisations_view(request):
try:
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
current_user_id = Utils.get_connected_user_id(request)
current_user_id = int(current_user_id) if current_user_id else None
if current_user_id:
# Params
idDelegation = None
idDelegataire = None
autorisationLecture = None
autorisationModification = None
autorisationSuppression = None
if 'idDelegation' in request.params:
idDelegation = request.params['idDelegation']
delegation_record = request.dbsession.query(models.Delegation).filter(models.Delegation.id == idDelegation).first()
if delegation_record == None :
raise Exception(id_not_found_exception)
with transaction.manager:
if 'idDelegataire' in request.params:
idDelegataire = request.params['idDelegataire']
delegation_record.id_delegataire = idDelegataire
if 'autorisationLecture' in request.params:
autorisationLecture = request.params['autorisationLecture']
if autorisationLecture == 'true':
autorisationLecture = True
elif autorisationLecture == 'false':
autorisationLecture = False
else:
autorisationLecture = None
delegation_record.autorisation_lecture = autorisationLecture
if 'autorisationModification' in request.params:
autorisationModification = request.params['autorisationModification']
if autorisationModification == 'true':
autorisationModification = True
elif autorisationModification == 'false':
autorisationModification = False
else:
autorisationModification = None
delegation_record.autorisation_modification = autorisationModification
if 'autorisationSuppression' in request.params:
autorisationSuppression = request.params['autorisationSuppression']
if autorisationSuppression == 'true':
autorisationSuppression = True
elif autorisationSuppression == 'false':
autorisationSuppression = False
else:
autorisationSuppression = None
delegation_record.autorisation_suppression = autorisationSuppression
transaction.commit()
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as error:
log.error(str(error))
return {'error': 'true', 'code': 403, 'message': str(error)}
return {'message': 'Data successfully saved'}
########################################################
# Delete delegation by id
########################################################
@view_config(route_name='autorisation_by_id', request_method='DELETE', renderer='json')
def delete_autorisation_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
id = request.matchdict['id']
query = request.dbsession.query(models.Delegation)
delegation = query.filter(models.Delegation.id == id).first()
if not delegation:
raise Exception(id_not_found_exception)
with transaction.manager:
request.dbsession.delete(delegation)
# Commit transaction
transaction.commit()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500,
'message': id_not_found_exception if str(e) == id_not_found_exception else general_exception}
return {'message': 'Data successfully saved'}
########################################################
# Autorisations fonctions
########################################################
@view_config(route_name='autorisations_fonctions', request_method='GET', renderer='json')
@view_config(route_name='autorisations_fonctions_slash', request_method='GET', renderer='json')
def autorisations_fonctions_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
current_user_id = Utils.get_connected_user_id(request)
query = request.dbsession.query(models.AutorisationFonction).filter(models.AutorisationFonction.id_utilisateur == current_user_id).first()
return query
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return {}
########################################################
# LDAP users
########################################################
@view_config(route_name='ldap_users', request_method='GET', renderer='json')
@view_config(route_name='ldap_users_slash', request_method='GET', renderer='json')
def ldap_users_view(request):
try:
settings = request.registry.settings
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as error:
log.error(str(error))
return {'error': 'true', 'code': 403, 'message': str(error)}
return "okkkk"
########################################################
# Localites npa
########################################################
@view_config(route_name='localites_npa', request_method='GET', renderer='json')
@view_config(route_name='localites_npa_slash', request_method='GET', renderer='json')
def localites_npa_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
query = request.dbsession.query(models.Localite).all()
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as error:
log.error(str(error))
return {'error': 'true', 'code': 403, 'message': str(error)}
return query
########################################################
# Localités view
########################################################
@view_config(route_name='localites', request_method='GET', renderer='json')
@view_config(route_name='localites_slash', request_method='GET', renderer='json')
def localites_view(request):
try:
settings = request.registry.settings
# Localités
typename = settings['localites_typename']
propertyname = settings['localites_propertyname']
localites_return_template = settings['localites_return_template']
query = WFSQuery.do_query_wfs(request,
typename,
propertyname,
None,
localites_return_template,
None)
if not query:
return []
except Exception as error:
log.error(str(error))
return {'error': 'true', 'code': 500, 'message': general_exception}
return query
########################################################
# Cadastre view
########################################################
@view_config(route_name='cadastre', request_method='GET', renderer='json')
@view_config(route_name='cadastre_slash', request_method='GET', renderer='json')
def cadastre_view(request):
try:
settings = request.registry.settings
# Cadastre
typename = settings['cadastre_typename']
propertyname = settings['cadastre_propertyname']
cadastre_return_template = settings['cadastre_return_template']
query = WFSQuery.do_query_wfs(request,
typename,
propertyname,
None,
cadastre_return_template,
None)
if not query:
return []
except Exception as error:
log.error(str(error))
return {'error': 'true', 'code': 500, 'message': general_exception}
return query
########################################################
# Communes
########################################################
@view_config(route_name='communes', request_method='GET', renderer='json')
@view_config(route_name='communes_slash', request_method='GET', renderer='json')
def communes_view(request):
try:
settings = request.registry.settings
# Communes
typename = settings['communes_typename']
propertyname = settings['communes_propertyname']
communes_return_template = settings['communes_return_template']
query = WFSQuery.do_query_wfs(request,
typename,
propertyname,
None,
communes_return_template,
None)
if not query:
return []
except Exception as error:
log.error(str(error))
return {'error': 'true', 'code': 500, 'message': general_exception}
return query
########################################################
# Get geometry from reperage
########################################################
@view_config(route_name='geometry_reperage', request_method='GET', renderer='json')
@view_config(route_name='geometry_reperage_slash', request_method='GET', renderer='json')
def geometry_reperage_view(request):
try:
settings = request.registry.settings
vmdeport_ws_url = settings['vmdeport_ws_url']
params = "f_prop={1}&f_axe={2}&f_sens={3}&f_pr_d={4}&f_pr_f={5}&f_dist_d={6}&f_dist_f={7}&f_ecart_d={8}&f_ecart_f={9}&f_usaneg={10}"
f_prop = None
f_axe = None
f_sens = None
f_pr_d = None
f_pr_f = None
f_dist_d = None
f_dist_f = None
f_ecart_d = None
f_ecart_f = None
f_usaneg = None
f_geomAsBin = None
if 'f_prop' in request.params:
f_prop = request.params['f_prop']
if 'f_axe' in request.params:
f_axe = request.params['f_axe']
if 'f_sens' in request.params:
f_sens = request.params['f_sens']
if 'f_pr_d' in request.params:
f_pr_d = request.params['f_pr_d']
if 'f_pr_f' in request.params:
f_pr_f = request.params['f_pr_f']
if 'f_dist_d' in request.params:
f_dist_d = request.params['f_dist_d']
if 'f_dist_f' in request.params:
f_dist_f = request.params['f_dist_f']
if 'f_ecart_d' in request.params:
f_ecart_d = request.params['f_ecart_d']
if 'f_ecart_f' in request.params:
f_ecart_f = request.params['f_ecart_f']
if 'f_usaneg' in request.params:
f_usaneg = request.params['f_usaneg']
# if 'f_geomAsBin' in request.params:
# f_geomAsBin = request.params['f_geomAsBin']
if not f_prop or not f_axe or not f_sens or not f_pr_d or not f_pr_f or not f_dist_d or not f_dist_f or not f_ecart_d or not f_ecart_f or not f_usaneg: # or not f_geomAsBin:
raise Exception('Parmaeter(s) empty or not valid')
params = params.replace('{1}', f_prop).replace('{2}', f_axe).replace('{3}', f_sens).replace('{4}',
f_pr_d).replace(
'{5}', f_pr_f).replace('{6}', f_dist_d).replace('{7}', f_dist_f).replace('{8}', f_ecart_d).replace('{9}',
f_ecart_f).replace(
'{10}', f_usaneg)
response = requests.get(vmdeport_ws_url + "?" + params)
response_code = response.status_code
if(int(response_code) != 200):
return {'error': 'true', 'sitn_error': 'true', 'code': response_code, 'message': response.text}
result = request.dbsession.query(func.public.ST_AsGeoJSON(func.public.ST_Force2D(response.json()))).all()
if result != None:
result = str(result).replace("('", "").replace("',)", "")
result = json.loads(result)
except Exception as error:
log.error(str(error))
return {'error': 'true', 'sitn_error': false, 'code': 500, 'message': general_exception}
return result
########################################################
# Search evenement
########################################################
@view_config(route_name='search_evenements', request_method='POST', renderer='json')
@view_config(route_name='search_evenements_slash', request_method='POST', renderer='json')
def search_evenements_view(request):
try:
conditions = []
id_entite = None
if 'idEntite' in request.params:
id_entite = request.params['idEntite']
conditions.append(models.SearchEvenementView.id_entite == id_entite)
# Check authorization
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
current_user_id = Utils.get_connected_user_id(request)
current_user_id = int(current_user_id) if current_user_id else None
conditions.append(models.SearchEvenementView.id_utilisateur == current_user_id)
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
search_limit = int(settings['search_limit'])
# Read params
if ('numeroDossier' in request.params and request.params['numeroDossier'] != ""):
conditions.append(func.lower(models.SearchEvenementView.numero_dossier).like('%' + func.lower(request.params['numeroDossier']) + '%'))
else:
if ('type' in request.params and request.params['type'] != ""):
conditions.append(models.SearchEvenementView.type == request.params['type'])
if ('prevision' in request.params and request.params['prevision'] != ""):
conditions.append(models.SearchEvenementView.prevision == request.params['prevision'])
if ('libelle' in request.params and request.params['libelle'] != ""):
conditions.append(
func.lower(models.SearchEvenementView.libelle).like('%' + func.lower(request.params['libelle']) + '%'))
if ('dateDebut' in request.params and request.params['dateDebut'] != ""):
conditions.append(func.DATE(models.SearchEvenementView.date_fin) >= func.DATE(request.params['dateDebut']))
if ('dateFin' in request.params and request.params['dateFin'] != ""):
conditions.append(func.DATE(models.SearchEvenementView.date_debut) <= func.DATE(request.params['dateFin']))
if ('division' in request.params and request.params['division'] != ""):
conditions.append(
func.lower(models.SearchEvenementView.division).like('%' + func.lower(request.params['division']) + '%'))
if ('idRequerant' in request.params and request.params['idRequerant'] != ""):
conditions.append(models.SearchEvenementView.id_requerant == request.params['idRequerant'])
if ('idResponsable' in request.params and request.params['idResponsable'] != ""):
conditions.append(models.SearchEvenementView.id_responsable == request.params['idResponsable'])
if ('axe' in request.params and request.params['axe'] != ""):
conditions.append(models.SearchEvenementView.axe == request.params['axe'])
if ('prDebut' in request.params and request.params['prDebut'] != ""):
conditions.append(models.SearchEvenementView.pr_debut >= request.params['prDebut'])
if ('prFin' in request.params and request.params['prFin'] != ""):
conditions.append(models.SearchEvenementView.pr_fin <= request.params['prFin'])
if ('prDebutSegSeq' in request.params and request.params['prDebutSegSeq'] != ""):
conditions.append(models.SearchEvenementView.pr_debut_seg_seq >= request.params['prDebutSegSeq'])
if ('prDebutSecSeq' in request.params and request.params['prDebutSecSeq'] != ""):
conditions.append(models.SearchEvenementView.pr_debut_sec_seq >= request.params['prDebutSecSeq'])
if ('prFinSegSeq' in request.params and request.params['prFinSegSeq'] != ""):
conditions.append(models.SearchEvenementView.pr_fin_seg_seq <= request.params['prFinSegSeq'])
if ('prFinSecSeq' in request.params and request.params['prFinSecSeq'] != ""):
conditions.append(models.SearchEvenementView.pr_fin_sec_seq <= request.params['prFinSecSeq'])
if ('ajoutePar' in request.params and request.params['ajoutePar'] != ""):
conditions.append(models.SearchEvenementView.id_utilisateur_ajout == request.params['ajoutePar'])
if ('prTouche' in request.params and request.params['prTouche'] != ""):
conditions.append(models.SearchEvenementView.pr_touches == request.params['prTouche'])
if ('compteurTouche' in request.params and request.params['compteurTouche'] != ""):
compteurTouche = request.params['compteurTouche']
if compteurTouche == 'true':
compteurTouche = True
elif compteurTouche == 'false':
compteurTouche = False
else:
compteurTouche = None
conditions.append(models.SearchEvenementView.compteur_touche == compteurTouche)
query = request.dbsession.query(models.SearchEvenementView).order_by(models.SearchEvenementView.id.desc())
if len(conditions) > 2:
result = query.filter(*conditions).all()
else:
result = query.filter(*conditions).all()[:search_limit]
formattedResult = []
for evenement in result:
if evenement != None:
formattedResult.append(evenement.format())
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return formattedResult
########################################################
# Search perturbations
########################################################
@view_config(route_name='search_perturbations', request_method='POST', renderer='json')
@view_config(route_name='search_perturbations_slash', request_method='POST', renderer='json')
def search_perturbations_view(request):
try:
conditions = []
id_entite = None
if 'idEntite' in request.params:
id_entite = request.params['idEntite']
conditions.append(models.SearchPerturbationView.id_entite == id_entite)
# Check authorization
auth_tkt = request.cookies.get('auth_tkt', default=None)
if not auth_tkt:
raise HTTPForbidden()
current_user_id = Utils.get_connected_user_id(request)
current_user_id = int(current_user_id) if current_user_id else None
conditions.append(models.SearchPerturbationView.id_utilisateur == current_user_id)
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
search_limit = int(settings['search_limit'])
# Read params
if ('numeroDossierEvenement' in request.params and request.params['numeroDossierEvenement'] != ""):
conditions.append(func.lower(models.SearchPerturbationView.numero_dossier_evenement).like(
'%' + func.lower(request.params['numeroDossierEvenement']) + '%'))
else:
if 'type' in request.params and request.params['type'] != "":
conditions.append(models.SearchPerturbationView.type == request.params['type'])
if ('axe' in request.params and request.params['axe'] != ""):
conditions.append(models.SearchPerturbationView.axe == request.params['axe'])
if ('prDebut' in request.params and request.params['prDebut'] != ""):
conditions.append(models.SearchPerturbationView.pr_debut == request.params['prDebut'])
if ('prFin' in request.params and request.params['prFin'] != ""):
conditions.append(models.SearchPerturbationView.pr_fin == request.params['prFin'])
if ('prDebutSegSeq' in request.params and request.params['prDebutSegSeq'] != ""):
conditions.append(models.SearchPerturbationView.pr_debut_seg_seq >= request.params['prDebutSegSeq'])
if ('prDebutSecSeq' in request.params and request.params['prDebutSecSeq'] != ""):
conditions.append(models.SearchPerturbationView.pr_debut_sec_seq >= request.params['prDebutSecSeq'])
if ('prFinSegSeq' in request.params and request.params['prFinSegSeq'] != ""):
conditions.append(models.SearchPerturbationView.pr_fin_seg_seq <= request.params['prFinSegSeq'])
if ('prFinSecSeq' in request.params and request.params['prFinSecSeq'] != ""):
conditions.append(models.SearchPerturbationView.pr_fin_sec_seq <= request.params['prFinSecSeq'])
if 'etat' in request.params and request.params['etat'] != "":
conditions.append(models.SearchPerturbationView.etat == request.params['etat'])
if 'urgence' in request.params and request.params['urgence'] != "":
conditions.append(models.SearchPerturbationView.urgence == request.params['urgence'])
if ('prDebut' in request.params and request.params['prDebut'] != ""):
conditions.append(models.SearchPerturbationView.pr_debut == request.params['prDebut'])
if ('prFin' in request.params and request.params['prFin'] != ""):
conditions.append(models.SearchPerturbationView.pr_fin == request.params['prFin'])
if ('description' in request.params and request.params['description'] != ""):
conditions.append(func.lower(models.SearchPerturbationView.description).like('%' + func.lower(request.params['description']) + '%'))
if 'typeEvenement' in request.params and request.params['typeEvenement'] != "":
conditions.append(models.SearchPerturbationView.type_evenement == request.params['typeEvenement'])
if ('dateDebut' in request.params and request.params['dateDebut'] != ""):
conditions.append(
func.DATE(models.SearchPerturbationView.date_fin) >= func.DATE(request.params['dateDebut']))
if ('dateFin' in request.params and request.params['dateFin'] != ""):
conditions.append(
func.DATE(models.SearchPerturbationView.date_debut) <= func.DATE(request.params['dateFin']))
# if ('comptage' in request.params and request.params['comptage'] != ""):
# conditions.append(models.SearchPerturbationView.comptage == request.params['comptage'])
if ('ajoutePar' in request.params and request.params['ajoutePar'] != ""):
conditions.append(models.SearchPerturbationView.id_utilisateur_ajout == request.params['ajoutePar'])
if ('compteurTouche' in request.params and request.params['compteurTouche'] != ""):
compteurTouche = request.params['compteurTouche']
if compteurTouche == 'true':
compteurTouche = True
elif compteurTouche == 'false':
compteurTouche = False
else:
compteurTouche = None
conditions.append(models.SearchPerturbationView.compteur_touche == compteurTouche)
query = request.dbsession.query(models.SearchPerturbationView).order_by(models.SearchPerturbationView.id.desc())
if len(conditions) > 2:
result = query.filter(*conditions).all()
else:
result = query.filter(*conditions).all()[:search_limit]
formattedResult = []
for perturbation in result:
if perturbation != None:
formattedResult.append(perturbation.format())
except HTTPForbidden as e:
raise HTTPForbidden()
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return formattedResult
########################################################
# Conflits perturbation by id view
########################################################
@view_config(route_name='conflits_perturabations_by_id', request_method='GET', renderer='json')
def conflits_perturabations_by_id_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
id = request.matchdict['id']
conflicts_date_buffer = settings['conflicts_date_buffer']
query_s = 'perturbtrafic.pt_conflits_by_evenement_id_json({0}, {1})'.format(id, conflicts_date_buffer)
query = request.dbsession.query(query_s).all()
result = None
if query and len(query) > 0:
result = str(query).replace('(', '').replace(',)', '').replace("{'", '{"').replace("':", '":').replace(
": '", ': "').replace(", '", ', "').replace("',", '",').replace("'}", '"}').replace('\\"', '"').replace(
"None", '""')
result = json.loads(result)
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return result
########################################################
# Conflits perturbation
########################################################
@view_config(route_name='conflits_perturabations', request_method='GET', renderer='json')
@view_config(route_name='conflits_perturabations_slash', request_method='GET', renderer='json')
def conflits_perturabations_view(request):
try:
settings = request.registry.settings
request.dbsession.execute('set search_path to ' + settings['schema_name'])
conflicts_date_buffer = settings['conflicts_date_buffer']
query_s = 'perturbtrafic.pt_conflits_json({0})'.format(conflicts_date_buffer)
query = request.dbsession.query(query_s).all()
result = None
if query and len(query) > 0:
result = str(query).replace('(', '').replace(',)', '').replace("{'", '{"').replace("':", '":').replace(
": '", ': "').replace(", '", ', "').replace("',", '",').replace("'}", '"}').replace('\\"', '"').replace(
"None", '""')
result = json.loads(result)
except Exception as e:
log.error(str(e))
return {'error': 'true', 'code': 500, 'message': general_exception}
return result
########################################################
# Common OPTION RESPONSE
########################################################
# @view_config(route_name='search_evenements', request_method='OPTIONS', renderer='json')
# @view_config(route_name='search_evenements_slash', request_method='OPTIONS', renderer='json')
@view_config(route_name='evenement_edition', request_method='OPTIONS', renderer='json')
@view_config(route_name='evenement_edition_slash', request_method='OPTIONS', renderer='json')
@view_config(route_name='perturbation_edition', request_method='OPTIONS', renderer='json')
@view_config(route_name='perturbation_edition_slash', request_method='OPTIONS', renderer='json')
@view_config(route_name='evenement_by_id', request_method='OPTIONS', renderer='json')
@view_config(route_name='perturbation_by_id', request_method='OPTIONS', renderer='json')
@view_config(route_name='contacts', request_method='OPTIONS', renderer='json')
@view_config(route_name='contacts_slash', request_method='OPTIONS', renderer='json')
@view_config(route_name='contact_by_id', request_method='OPTIONS', renderer='json')
@view_config(route_name='organismes', request_method='OPTIONS', renderer='json')
@view_config(route_name='organismes_slash', request_method='OPTIONS', renderer='json')
@view_config(route_name='organisme_by_id', request_method='OPTIONS', renderer='json')
@view_config(route_name='suggestion_by_liste_name', request_method='OPTIONS', renderer='json')
@view_config(route_name='contacts_potentiels_avis_perturbation', request_method='OPTIONS', renderer='json')
@view_config(route_name='contacts_potentiels_avis_perturbation_slash', request_method='OPTIONS', renderer='json')
@view_config(route_name='contacts_potentiels_avis_perturbation_by_id', request_method='OPTIONS', renderer='json')
@view_config(route_name='contacts_avis_fermeture_urgence', request_method='OPTIONS', renderer='json')
@view_config(route_name='contacts_avis_fermeture_urgence_slash', request_method='OPTIONS', renderer='json')
@view_config(route_name='contacts_avis_fermeture_urgence_by_id', request_method='OPTIONS', renderer='json')
@view_config(route_name='contact_avis_pr_touche', request_method='OPTIONS', renderer='json')
@view_config(route_name='contact_avis_pr_touche_slash', request_method='OPTIONS', renderer='json')
@view_config(route_name='contact_avis_pr_touche_by_id', request_method='OPTIONS', renderer='json')
@view_config(route_name='autorisation_by_id', request_method='OPTIONS', renderer='json')
@view_config(route_name='autorisations', request_method='OPTIONS', renderer='json')
@view_config(route_name='autorisations_slash', request_method='OPTIONS', renderer='json')
def options_response_view(request):
return ''
########################################################
# Common IntegrityError return message
########################################################
@view_config(context=exc.IntegrityError, renderer='json')
def integrity_error(exc, request):
log.error(str(exc.orig) if hasattr(exc, 'orig') else str(exc))
return {'error': 'true', 'code': 500, 'message': str(exc)}
########################################################
# Common StatementError return message
########################################################
@view_config(context=exc.StatementError, renderer='json')
def statement_error(exc, request):
log.error(str(exc.orig) if hasattr(exc, 'orig') else str(exc))
return {'error': 'true', 'code': 500, 'message': general_exception}
########################################################
# Common ResourceClosedError return message
########################################################
@view_config(context=exc.ResourceClosedError, renderer='json')
def resource_closed_error(exc, request):
log.error(str(exc.orig) if hasattr(exc, 'orig') else str(exc))
return {'error': 'true', 'code': 500, 'message': general_exception}
########################################################
# Common InternalError return message
########################################################
@view_config(context=exc.InternalError, renderer='json')
def internal_error(exc, request):
log.error(str(exc.orig) if hasattr(exc, 'orig') else str(exc))
return {'error': 'true', 'code': 500, 'message': general_exception}
########################################################
# Common NoReferenceError return message
########################################################
@view_config(context=exc.NoReferenceError, renderer='json')
def noreference_error(exc, request):
log.error(str(exc.orig) if hasattr(exc, 'orig') else str(exc))
return {'error': 'true', 'code': 500, 'message': general_exception}
########################################################
# Common InvalidRequestError, return message
########################################################
@view_config(context=exc.InvalidRequestError, renderer='json')
def noreference_error(exc, request):
log.error(str(exc.orig) if hasattr(exc, 'orig') else str(exc))
return {'error': 'true', 'code': 500, 'message': general_exception}
########################################################
# Common DBAPIError return message
########################################################
@view_config(context=exc.DBAPIError, renderer='json')
def noreference_error(exc, request):
log.error(str(exc.orig) if hasattr(exc, 'orig') else str(exc))
return {'error': 'true', 'code': 500, 'message': general_exception}
########################################################
# Common SQLAlchemyError return message
########################################################
@view_config(context=exc.SQLAlchemyError, renderer='json')
def noreference_error(exc, request):
log.error(str(exc.orig) if hasattr(exc, 'orig') else str(exc))
return {'error': 'true', 'code': 500, 'message': general_exception}
########################################################
# Common HTTPForbidden return message
########################################################
@view_config(context=HTTPForbidden, renderer='json')
def http_forbidden_error(exc, request):
log.error(str(exc.orig) if hasattr(exc, 'orig') else str(exc))
request.response.status = 403
return {'error': 'true', 'code': 403, 'message': not_authorized_exception}
db_err_msg = """\
Pyramid is having a problem using your SQL database. The problem
might be caused by one of the following things:
1. You may need to initialize your database tables with `alembic`.
Check your README.txt for descriptions and try to run it.
2. Your database server may not be running. Check that the
database server referred to by the "sqlalchemy.url" setting in
your "development.ini" file is running.
After you fix the problem, please restart the Pyramid application to
try it again.
"""
| 39.970723
| 335
| 0.604935
| 25,232
| 278,516
| 6.41701
| 0.03016
| 0.093296
| 0.051416
| 0.019362
| 0.855405
| 0.800518
| 0.77149
| 0.755852
| 0.733902
| 0.713663
| 0
| 0.001831
| 0.274379
| 278,516
| 6,967
| 336
| 39.97646
| 0.799319
| 0.039617
| 0
| 0.72143
| 0
| 0.000679
| 0.121017
| 0.035126
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020819
| false
| 0.000905
| 0.0043
| 0.000453
| 0.067436
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5145817298d356e9ae1bcd1844572d12739ee312
| 2,359
|
py
|
Python
|
tests/test_compare_networks.py
|
pawel-slowik/ipset-country
|
e14994eb44d977c59c147a81fabc4c3f3cd8c392
|
[
"MIT"
] | null | null | null |
tests/test_compare_networks.py
|
pawel-slowik/ipset-country
|
e14994eb44d977c59c147a81fabc4c3f3cd8c392
|
[
"MIT"
] | null | null | null |
tests/test_compare_networks.py
|
pawel-slowik/ipset-country
|
e14994eb44d977c59c147a81fabc4c3f3cd8c392
|
[
"MIT"
] | null | null | null |
import ipaddress
from ipset import compare_networks
def test_identical() -> None:
comparision = compare_networks(
[ipaddress.IPv4Network("1.1.1.0/24")],
[ipaddress.IPv4Network("1.1.1.0/24")],
)
assert list(comparision.common_networks) == [ipaddress.IPv4Network("1.1.1.0/24")]
assert not comparision.ipdeny_missing
assert not comparision.ripestat_missing
assert comparision.differences_count == 0
def test_ipdeny_missing() -> None:
comparision = compare_networks(
[ipaddress.IPv4Network("1.1.1.0/24")],
[ipaddress.IPv4Network("1.1.1.0/24"), ipaddress.IPv4Network("2.2.2.0/24")],
)
assert list(comparision.common_networks) == [ipaddress.IPv4Network("1.1.1.0/24")]
assert list(comparision.ipdeny_missing) == [ipaddress.IPv4Network("2.2.2.0/24")]
assert not comparision.ripestat_missing
assert comparision.differences_count == 1
def test_ripestat_missing() -> None:
comparision = compare_networks(
[ipaddress.IPv4Network("1.1.1.0/24"), ipaddress.IPv4Network("3.3.3.0/24")],
[ipaddress.IPv4Network("1.1.1.0/24")],
)
assert list(comparision.common_networks) == [ipaddress.IPv4Network("1.1.1.0/24")]
assert not comparision.ipdeny_missing
assert list(comparision.ripestat_missing) == [ipaddress.IPv4Network("3.3.3.0/24")]
assert comparision.differences_count == 1
def test_both_missing() -> None:
comparision = compare_networks(
[ipaddress.IPv4Network("1.1.1.0/24"), ipaddress.IPv4Network("3.3.3.0/24")],
[ipaddress.IPv4Network("1.1.1.0/24"), ipaddress.IPv4Network("2.2.2.0/24")],
)
assert list(comparision.common_networks) == [ipaddress.IPv4Network("1.1.1.0/24")]
assert list(comparision.ipdeny_missing) == [ipaddress.IPv4Network("2.2.2.0/24")]
assert list(comparision.ripestat_missing) == [ipaddress.IPv4Network("3.3.3.0/24")]
assert comparision.differences_count == 2
def test_disjoint() -> None:
comparision = compare_networks(
[ipaddress.IPv4Network("3.3.3.0/24")],
[ipaddress.IPv4Network("2.2.2.0/24")],
)
assert not comparision.common_networks
assert list(comparision.ipdeny_missing) == [ipaddress.IPv4Network("2.2.2.0/24")]
assert list(comparision.ripestat_missing) == [ipaddress.IPv4Network("3.3.3.0/24")]
assert comparision.differences_count == 2
| 40.672414
| 86
| 0.692666
| 313
| 2,359
| 5.111821
| 0.089457
| 0.3
| 0.084375
| 0.165
| 0.9275
| 0.9275
| 0.90875
| 0.89875
| 0.89875
| 0.815625
| 0
| 0.085899
| 0.146248
| 2,359
| 57
| 87
| 41.385965
| 0.70854
| 0
| 0
| 0.659574
| 0
| 0
| 0.101738
| 0
| 0
| 0
| 0
| 0
| 0.425532
| 1
| 0.106383
| false
| 0
| 0.042553
| 0
| 0.148936
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
5ab802833f3799bb18f47ff51f1928c2f0b535be
| 105,979
|
py
|
Python
|
tests/test_activate.py
|
astrojuanlu/conda
|
badf048f5e8287250ef1940249a048f9bde08477
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_activate.py
|
astrojuanlu/conda
|
badf048f5e8287250ef1940249a048f9bde08477
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_activate.py
|
astrojuanlu/conda
|
badf048f5e8287250ef1940249a048f9bde08477
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, unicode_literals
import subprocess
import tempfile
import os
import stat
import sys
from textwrap import dedent
import re
import pytest
from conda.compat import TemporaryDirectory
from conda.config import root_dir, platform
from conda.install import symlink_conda
from conda.utils import path_identity, shells, on_win, translate_stream
from conda.cli.activate import binpath_from_arg
from tests.helpers import assert_equals, assert_in, assert_not_in
def gen_test_env_paths(envs, shell, num_test_folders=5):
"""People need not use all the test folders listed here.
This is only for shortening the environment string generation.
Also encapsulates paths in double quotes.
"""
paths = [os.path.join(envs, "test {}".format(test_folder+1)) for test_folder in range(num_test_folders)]
for path in paths[:2]: # Create symlinks ONLY for the first two folders.
symlink_conda(path, sys.prefix, shell)
converter = shells[shell]["path_to"]
paths = {i:converter(path) for i, path in enumerate(paths)}
paths["root"]="root"
paths["bad"]="foo bar baz qux"
envname = {k:shells[shell]["var_set"].format(variable="CONDA_ENVNAME",value=path) for k,path in paths.items()}
return (paths, envname)
def _envpaths(env_root, env_name="", shelldict={}):
"""Supply the appropriate platform executable folders. rstrip on root removes
trailing slash if env_name is empty (the default)
Assumes that any prefix used here exists. Will not work on prefixes that don't.
"""
sep = shelldict['sep']
return binpath_from_arg(sep.join([env_root, env_name]), shelldict=shelldict)
def print_ps1(env_dirs, base_prompt, number):
return u"({}) {}".format(env_dirs[number],base_prompt)
def raw_string(s):
if isinstance(s, str):
s = s.encode('string-escape')
elif isinstance(s, unicode):
s = s.encode('unicode-escape')
return s
def strip_leading_library_bin(path_string, shelldict):
entries = path_string.split(shelldict['path_delim'])
if "library{}bin".format(shelldict['sep']) in entries[0].lower():
entries = entries[1:]
return shelldict['path_delim'].join(entries)
def _format_vars(shell):
shelldict = shells[shell]
base_path, _ = run_in(shelldict['path_print'], shell)
# windows forces Library/bin onto PATH when starting up. Strip it for the purposes of this test.
if on_win:
base_path = strip_leading_library_bin(base_path, shelldict)
# base_prompt, _ = run_in(shelldict["prompt_print"], shell)
base_prompt = "test_prompt"
syspath = shelldict['path_to'](sys.prefix)
binpath = shelldict['path_to'](shelldict['binpath'])
setenv_pythonpath=shelldict["envvar_set"].format(
variable="PYTHONPATH",
value=shelldict['path_to'](os.path.dirname(os.path.dirname(__file__))))
# remove any conda RC references
unsetenv_condarc=shelldict["envvar_unset"].format(
variable="CONDARC")
# clear any preset conda environment
unsetenv_condadefaultenv=shelldict["envvar_unset"].format(
variable="CONDA_DEFAULT_ENV")
flags_verbose="{flag_single}v".format(**shelldict)
flags_help="{flag_single}h".format(**shelldict)
# set prompt such that we have a prompt to play
# around and test with since most of the below
# tests will not be invoked in an interactive
# login shell and hence wont have the prompt initialized
#
# setting this here also means that we no longer have to
# mess with the .bash_profile during testing to
# standardize the base prompt
prompt_set=shelldict["prompt_set"].format(
value=base_prompt)
command_setup = dedent("""\
{setenv_pythonpath}
{unsetenv_condarc}
{unsetenv_condadefaultenv}
{prompt_set}
""").format(setenv_pythonpath=setenv_pythonpath,
unsetenv_condarc=unsetenv_condarc,
unsetenv_condadefaultenv=unsetenv_condadefaultenv,
prompt_set=prompt_set)
if shelldict["suffix_script"] == '.bat':
command_setup = "@ECHO OFF\n" + command_setup
shelldict.update({
'base_prompt': base_prompt,
'syspath': syspath,
'binpath': binpath,
'command_setup': command_setup,
'base_path': base_path,
'flags_verbose': flags_verbose,
'flags_help': flags_help,
})
return shelldict
@pytest.mark.installed
def test_activate_test1(shell):
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{}
{{path_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}"
{{path_print}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_in(shell_vars['path_delim'].join(_envpaths(envs, 'test 1', shelldict=shell_vars)),
stdout, shell)
assert_equals(stderr,'')
@pytest.mark.installed
def test_activate_noleftoverargs(shell):
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
# get env results before any changes
commands = shell_vars['command_setup'] + dedent("""\
{envvar_getall}
""").format(
**shell_vars)
stdout_init, _ = run_in(commands, shell)
stdout_init = set(s.split("=")[0] for s in stdout_init.split("\n"))
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{}
{{envvar_getall}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}"
{{envvar_getall}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
stdout = set(s.split("=")[0] for s in stdout.split("\n"))
stdout_diff = list(stdout - stdout_init)
stdout_diff = [s for s in stdout_diff if not s.startswith("_")]
print("commands:",commands)
print("stdout_init:","\n".join(stdout_init))
print("stdout:","\n".join(stdout))
print("stdout_diff:","\n".join(stdout_diff))
print("stderr:",stderr)
# since this is the activate process we expect 3/4 new variables
# since other variable's value may be updated we do not check for that
if shell.endswith(".msys"):
# CONDA_PREFIX,CONDA_PS1_BACKUP,CONDA_DEFAULT_ENV,MSYS2_ENV_CONV_EXCL
assert len(stdout_diff) == 4
else:
# CONDA_PREFIX,CONDA_PS1_BACKUP,CONDA_DEFAULT_ENV
assert len(stdout_diff) == 3
assert "CONDA_PS1_BACKUP" in stdout_diff
assert "CONDA_DEFAULT_ENV" in stdout_diff
assert "CONDA_PREFIX" in stdout_diff
assert_equals(stderr,'')
@pytest.mark.installed
def test_deactivate_noleftoverargs(shell):
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
# get env results before any changes
commands = shell_vars['command_setup'] + dedent("""\
{envvar_getall}
""").format(
**shell_vars)
stdout_init, _ = run_in(commands, shell)
stdout_init = set(stdout_init.split("\n"))
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_deactivate = shell_vars['source'].format(
"{env_dirs[0]}{binpath}deactivate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{}
{}
{{envvar_getall}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}"
{}
{{envvar_getall}}
""")]
for script in scripts:
script = script.format(src_activate,src_deactivate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
stdout = set(stdout.split("\n"))
stdout_diff = list(stdout - stdout_init)
stdout_diff = [s for s in stdout_diff if not s.startswith("_")]
print("commands:",commands)
print("stdout_init:","\n".join(stdout_init))
print("stdout:","\n".join(stdout))
print("stdout_diff:","\n".join(stdout_diff))
print("stderr:",stderr)
# since this is the deactivate process we expect absolutely no differences
# from the original environment, this includes the actual values of the
# variables as well
assert len(stdout_diff) == 0
assert_equals(stderr,'')
@pytest.mark.installed
def test_activate_env_from_env_with_root_activate(shell):
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_activate_0 = shell_vars['source'].format(
"{env_dirs[0]}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{} {{nul}}
{{env_vars[1]}}
{}
{{path_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}" {{nul}}
{} "{{env_dirs[1]}}"
{{path_print}}
""")]
for script in scripts:
script = script.format(src_activate,src_activate_0)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:", commands)
print("stdout:", stdout)
print("stderr:", stderr)
assert_in(shell_vars['path_delim'].join(_envpaths(envs, 'test 2', shelldict=shell_vars)),
stdout, shell)
assert_equals(stderr,'')
@pytest.mark.installed
def test_activate_bad_directory(shell):
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
# Strange semicolons are here to defeat MSYS' automatic path conversion.
# See http://www.mingw.org/wiki/Posix_path_conversion
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[2]}}
{}
{{path_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[2]}}"
{{path_print}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
# another semicolon here for comparison reasons with one above.
assert_in('could not find environment',stderr,shell)
assert_not_in(env_dirs[2], stdout, shell)
@pytest.mark.installed
def test_activate_bad_env_keeps_existing_good_env(shell):
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_activate_0 = shell_vars['source'].format(
"{env_dirs[0]}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{} {{nul}}
{{env_vars[2]}}
{}
{{path_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}" {{nul}}
{} "{{env_dirs[2]}}"
{{path_print}}
""")]
for script in scripts:
script = script.format(src_activate,src_activate_0)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_in(shell_vars['path_delim'].join(_envpaths(envs, 'test 1', shelldict=shell_vars)),
stdout, shell)
assert_in("Could not find environment",stderr)
@pytest.mark.installed
def test_activate_deactivate(shell):
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_deactivate = shell_vars['source'].format(
"{env_dirs[0]}{binpath}deactivate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{} {{nul}}
{}
{{path_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}" {{nul}}
{}
{{path_print}}
""")]
for script in scripts:
script = script.format(src_activate,src_deactivate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
stdout = strip_leading_library_bin(stdout, shell_vars)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, u"%s" % shell_vars['base_path'], stderr)
assert_equals(stderr,'')
@pytest.mark.installed
def test_activate_root_simple(shell):
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[root]}}
{}
{{path_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[root]}}"
{{path_print}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_in(shell_vars['path_delim'].join(_envpaths(root_dir, shelldict=shell_vars)),
stdout, shell)
assert_equals(stderr,'')
@pytest.mark.installed
def test_activate_deactivate_root(shell):
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_deactivate = shell_vars['source'].format(
"{syspath}{binpath}deactivate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[root]}}
{}
{}
{{path_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[root]}}"
{}
{{path_print}}
""")]
for script in scripts:
script = script.format(src_activate,src_deactivate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
stdout = strip_leading_library_bin(stdout, shell_vars)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, u"%s" % shell_vars['base_path'], stderr)
assert_equals(stderr,'')
@pytest.mark.installed
def test_activate_root_env_from_other_env(shell):
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_activate_0 = shell_vars['source'].format(
"{env_dirs[0]}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{} {{nul}}
{{env_vars[root]}}
{}
{{path_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}" {{nul}}
{} "{{env_dirs[root]}}"
{{path_print}}
""")]
for script in scripts:
script = script.format(src_activate,src_activate_0)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_in(shell_vars['path_delim'].join(_envpaths(root_dir, shelldict=shell_vars)),
stdout, shell)
assert_not_in(shell_vars['path_delim'].join(_envpaths(envs, 'test 1', shelldict=shell_vars)),
stdout, shell)
assert_equals(stderr,'')
@pytest.mark.installed
def test_wrong_args(shell):
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
# cannot accidentally pass too many args to program when setting environment variables
scripts += []
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} two args
{{path_print}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
stdout = strip_leading_library_bin(stdout, shell_vars)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, u"%s" % shell_vars['base_path'], stderr)
assert_in("[ACTIVATE]: ERROR: Unknown/Invalid flag/parameter (args)",
stderr, shell)
@pytest.mark.installed
def test_activate_check_sourcing(shell):
if shell in ['powershell.exe', 'cmd.exe']:
pytest.skip("the concept of sourcing to modify one's current environment is only applicable for UNIX")
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = "{syspath}{binpath}activate{suffix_executable}"
# all unix shells support environment variables instead of parameter passing
scripts += [dedent("""\
{{env_vars[0]}}
{}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}"
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, '', stderr)
assert_in(dedent("""\
[ACTIVATE]: ERROR: Only supports sourcing from tcsh/csh and bash/zsh/dash/posh/ksh."""),
stderr, shell)
@pytest.mark.installed
def test_activate_help(shell):
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{help_var}}
{}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} {{flags_help}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
help_var=shell_vars["var_set"].format(variable="CONDA_HELP",value="true"),
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, '', stderr)
if shell in ["cmd.exe"]:
assert_in('Usage: activate [ENV] [/h] [/v]', stderr, shell)
elif shell in ["powershell.exe"]:
assert_in('Usage: activate [ENV] [-h] [-v]', stderr, shell)
elif shell in ["csh","tcsh"]:
assert_in('Usage: source "`which activate`" [ENV] [-h] [-v]', stderr, shell)
else:
assert_in('Usage: . activate [ENV] [-h] [-v]', stderr, shell)
@pytest.mark.installed
def test_deactivate_check_sourcing(shell):
if shell in ['powershell.exe', 'cmd.exe']:
pytest.skip("the concept of sourcing to modify one's current environment is only applicable for UNIX")
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_deactivate = "{syspath}{binpath}deactivate{suffix_executable}"
# since this is just the deactivate then no special testing is necessary
# for environment variables vs. parameter passing
scripts += [dedent("""\
{}
""")]
for script in scripts:
script = script.format(src_deactivate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, '', stderr)
assert_in(dedent("""\
[DEACTIVATE]: ERROR: Only supports sourcing from tcsh/csh and bash/zsh/dash/posh/ksh."""),
stderr, shell)
@pytest.mark.installed
def test_deactivate_help(shell):
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_deactivate = shell_vars['source'].format(
"{syspath}{binpath}deactivate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{help_var}}
{}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} {{flags_help}}
""")]
for script in scripts:
script = script.format(src_deactivate)
script = script.format(
help_var=shell_vars["var_set"].format(variable="CONDA_HELP",value="true"),
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, '', stderr)
if shell in ["cmd.exe"]:
assert_in('Usage: deactivate [/h] [/v]', stderr, shell)
elif shell in ["powershell"]:
assert_in('Usage: deactivate [-h] [-v]', stderr, shell)
elif shell in ["csh","tcsh"]:
assert_in('Usage: source "`which deactivate`" [-h] [-v]', stderr, shell)
else:
assert_in('Usage: . deactivate [-h] [-v]', stderr, shell)
@pytest.mark.installed
def test_activate_symlinking(shell):
"""Symlinks or bat file redirects are created at activation time. Make sure that the
files/links exist, and that they point where they should."""
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
for k in [0,1]:
for f in ["conda", "activate", "deactivate"]:
file_path = "{env_dir}{binpath}{f}{suffix_executable}".format(
env_dir=env_dirs[k],
f=f,
**shell_vars)
if on_win:
# must translate path to windows representation for Python's sake
file_path = shell_vars["path_from"](file_path)
print("on_win:")
print("file_path:",file_path)
assert(os.path.lexists(file_path))
else:
real_path = "{syspath}{binpath}{f}{suffix_executable}".format(
f=f,
**shell_vars)
print("not on_win:")
print("file_path:",file_path)
print("real_path:",real_path)
assert(os.path.lexists(file_path))
assert(stat.S_ISLNK(os.lstat(file_path).st_mode))
assert(os.readlink(file_path) == real_path)
if not on_win:
# test activate when there are no write permissions in the env
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
scripts += [dedent("""\
mkdir -p "{{env_dirs[2]}}{{binpath}}"
chmod 444 "{{env_dirs[2]}}{{binpath}}"
{{env_vars[2]}}
{}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
mkdir -p "{{env_dirs[2]}}{{binpath}}"
chmod 444 "{{env_dirs[2]}}{{binpath}}"
{} "{{env_dirs[2]}}"
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout,'')
assert_in("not have write access", stderr, shell)
# restore permissions so the dir will get cleaned up
commands = dedent("""\
chmod 777 "{env_dirs[2]}{binpath}"
""").format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
run_in(commands, shell)
@pytest.mark.installed
def test_PS1(shell):
if shell in ['powershell.exe']:
pytest.skip("powershell.exe doesn't support prompt modifications yet")
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
#-----------------------------------------------------------------------
# TEST 1: activate changes PS1 correctly
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{}
{{prompt_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}"
{{prompt_print}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, print_ps1(env_dirs=env_dirs,
base_prompt=shell_vars["base_prompt"],
number=0), stderr)
assert_equals(stderr,'')
#-----------------------------------------------------------------------
# TEST 2: second activate replaces earlier activated env PS1
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_activate_0 = shell_vars['source'].format(
"{env_dirs[0]}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{} {{nul}}
{{env_vars[1]}}
{}
{{prompt_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}" {{nul}}
{} "{{env_dirs[1]}}"
{{prompt_print}}
""")]
for script in scripts:
script = script.format(src_activate,src_activate_0)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, sterr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, print_ps1(env_dirs=env_dirs,
base_prompt=shell_vars["base_prompt"],
number=1), stderr)
assert_equals(stderr,'')
#-----------------------------------------------------------------------
# TEST 3: failed activate does not touch raw PS1
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[2]}}
{}
{{prompt_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[2]}}"
{{prompt_print}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, shell_vars['base_prompt'], stderr)
assert_in("Could not find environment",stderr)
#-----------------------------------------------------------------------
# TEST 4: ensure that a failed activate does not touch PS1
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_activate_0 = shell_vars['source'].format(
"{env_dirs[0]}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{} {{nul}}
{{env_vars[2]}}
{}
{{prompt_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}" {{nul}}
{} "{{env_dirs[2]}}"
{{prompt_print}}
""")]
if script in scripts:
script = script.format(src_activate,src_activate_0)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, print_ps1(env_dirs=env_dirs,
base_prompt=shell_vars["base_prompt"],
number=0), stderr)
assert_equals(stderr,'')
#-----------------------------------------------------------------------
# TEST 5: deactivate doesn't do anything bad to PS1 when no env active to deactivate
#-----------------------------------------------------------------------
scripts = []
src_deactivate = shell_vars['source'].format(
"{syspath}{binpath}deactivate{suffix_executable}")
# since this is just the deactivate then no special testing is necessary
# for environment variables vs. parameter passing
scripts += [dedent("""\
{}
{{prompt_print}}
""")]
for script in scripts:
script = script.format(src_deactivate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, shell_vars['base_prompt'], stderr)
assert_equals(stderr,'')
#-----------------------------------------------------------------------
# TEST 6: deactivate script in activated env returns us to raw PS1
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_deactivate = shell_vars['source'].format(
"{env_dirs[0]}{binpath}deactivate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{} {{nul}}
{}
{{prompt_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}" {{nul}}
{}
{{prompt_print}}
""")]
for script in scripts:
script = script.format(src_activate,src_deactivate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, shell_vars['base_prompt'], stderr)
assert_equals(stderr,'')
#-----------------------------------------------------------------------
# TEST 7: make sure PS1 is unchanged by faulty activate input
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
# cannot accidentally pass too many args to program when setting environment variables
scripts += []
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} two args
{{prompt_print}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, shell_vars['base_prompt'], stderr)
assert_in('[ACTIVATE]: ERROR: Unknown/invalid flag/parameter',stderr)
@pytest.mark.installed
def test_PS1_no_changeps1(shell):
"""Ensure that people's PS1 remains unchanged if they have that setting in their RC file."""
if shell in ['powershell.exe']:
pytest.skip("powershell.exe doesn't support prompt modifications yet")
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
rc_file = os.path.join(envs, ".condarc")
with open(rc_file, 'w') as f:
f.write("changeps1: False\n")
setenv_condarc = shell_vars["envvar_set"].format(
variable="CONDARC",
value=rc_file)
env_dirs,env_vars=gen_test_env_paths(envs, shell)
#-----------------------------------------------------------------------
# TEST 1: activate changes PS1 correctly
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{setenv_condarc}}
{{env_vars[0]}}
{}
{{prompt_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{{setenv_condarc}}
{} "{{env_dirs[0]}}"
{{prompt_print}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
setenv_condarc=setenv_condarc,
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, shell_vars["base_prompt"], stderr)
assert_equals(stderr,'')
#-----------------------------------------------------------------------
# TEST 2: second activate replaces earlier activated env PS1
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_activate_0 = shell_vars['source'].format(
"{env_dirs[0]}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{setenv_condarc}}
{{env_vars[0]}}
{} {{nul}}
{{env_vars[1]}}
{}
{{prompt_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{{setenv_condarc}}
{} "{{env_dirs[0]}}" {{nul}}
{} "{{env_dirs[1]}}"
{{prompt_print}}
""")]
for script in scripts:
script = script.format(src_activate,src_activate_0)
script = script.format(
setenv_condarc=setenv_condarc,
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, sterr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, shell_vars["base_prompt"], stderr)
assert_equals(stderr,'')
#-----------------------------------------------------------------------
# TEST 3: failed activate does not touch raw PS1
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{setenv_condarc}}
{{env_vars[2]}}
{}
{{prompt_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{{setenv_condarc}}
{} "{{env_dirs[2]}}"
{{prompt_print}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
setenv_condarc=setenv_condarc,
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, shell_vars["base_prompt"], stderr)
assert_in("Could not find environment",stderr)
#-----------------------------------------------------------------------
# TEST 4: ensure that a failed activate does not touch PS1
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_activate_0 = shell_vars['source'].format(
"{env_dirs[0]}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{setenv_condarc}}
{{env_vars[0]}}
{} {{nul}}
{{env_vars[2]}}
{}
{{prompt_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{{setenv_condarc}}
{} "{{env_dirs[0]}}" {{nul}}
{} "{{env_dirs[2]}}"
{{prompt_print}}
""")]
if script in scripts:
script = script.format(src_activate,src_activate_0)
script = script.format(
setenv_condarc=setenv_condarc,
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, shell_vars["base_prompt"], stderr)
assert_equals(stderr,'')
#-----------------------------------------------------------------------
# TEST 5: deactivate doesn't do anything bad to PS1 when no env active to deactivate
#-----------------------------------------------------------------------
scripts = []
src_deactivate = shell_vars['source'].format(
"{syspath}{binpath}deactivate{suffix_executable}")
# since this is just the deactivate then no special testing is necessary
# for environment variables vs. parameter passing
scripts += [dedent("""\
{{setenv_condarc}}
{}
{{prompt_print}}
""")]
for script in scripts:
script = script.format(src_deactivate)
script = script.format(
setenv_condarc=setenv_condarc,
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, shell_vars["base_prompt"], stderr)
assert_equals(stderr,'')
#-----------------------------------------------------------------------
# TEST 6: deactivate script in activated env returns us to raw PS1
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_deactivate = shell_vars['source'].format(
"{env_dirs[0]}{binpath}deactivate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{setenv_condarc}}
{{env_vars[0]}}
{} {{nul}}
{}
{{prompt_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{{setenv_condarc}}
{} "{{env_dirs[0]}}" {{nul}}
{}
{{prompt_print}}
""")]
for script in scripts:
script = script.format(src_activate,src_deactivate)
script = script.format(
setenv_condarc=setenv_condarc,
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, shell_vars['base_prompt'], stderr)
assert_equals(stderr,'')
#-----------------------------------------------------------------------
# TEST 7: make sure PS1 is unchanged by faulty activate input
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
# cannot accidentally pass too many args to program when setting environment variables
scripts += []
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{{setenv_condarc}}
{} two args
{{prompt_print}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
setenv_condarc=setenv_condarc,
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, shell_vars["base_prompt"], stderr)
assert_in('[ACTIVATE]: ERROR: Unknown/invalid flag/parameter',stderr)
@pytest.mark.installed
def test_CONDA_DEFAULT_ENV(shell):
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
#-----------------------------------------------------------------------
# TEST 1: activate sets CONDA_DEFAULT_ENV correctly
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{}
{{defaultenv_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}"
{{defaultenv_print}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout.rstrip(), env_dirs[0], stderr)
assert_equals(stderr,'')
#-----------------------------------------------------------------------
# TEST 2: second activate replaces earlier activated env CONDA_DEFAULT_ENV
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_activate_0 = shell_vars['source'].format(
"{env_dirs[0]}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{} {{nul}}
{{env_vars[1]}}
{}
{{defaultenv_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}" {{nul}}
{} "{{env_dirs[1]}}"
{{defaultenv_print}}
""")]
for script in scripts:
script = script.format(src_activate,src_activate_0)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout.rstrip(), env_dirs[1], stderr)
assert_equals(stderr,'')
#-----------------------------------------------------------------------
# TEST 3: failed activate does not set CONDA_DEFAULT_ENV
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[2]}}
{}
{{defaultenv_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[2]}}"
{{defaultenv_print}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, '', stderr)
assert_in("Could not find environment",stderr)
#-----------------------------------------------------------------------
# TEST 4: ensure that a failed activate does not overwrite CONDA_DEFAULT_ENV
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_activate_0 = shell_vars['source'].format(
"{env_dirs[0]}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{} {{nul}}
{{env_vars[2]}}
{}
{{defaultenv_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}" {{nul}}
{} "{{env_dirs[2]}}"
{{defaultenv_print}}
""")]
for script in scripts:
script = script.format(src_activate,src_activate_0)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout.rstrip(), env_dirs[0], stderr)
assert_in("Could not find environment",stderr)
#-----------------------------------------------------------------------
# TEST 5: deactivate doesn't set CONDA_DEFAULT_ENV when no env active to deactivate
#-----------------------------------------------------------------------
scripts = []
src_deactivate = shell_vars['source'].format(
"{syspath}{binpath}deactivate{suffix_executable}")
# since this is just the deactivate then no special testing is necessary
# for environment variables vs. parameter passing
scripts += [dedent("""\
{}
{{envvar_getall}}
""")]
for script in scripts:
script = script.format(src_deactivate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
stdout = [s.split("=")[0] for s in stdout.split("\n")]
print("commands:",commands)
print("stdout:","\n".join(stdout))
print("stderr:",stderr)
assert "CONDA_DEFAULT_ENV" not in stdout, "{} cannot find CONDA_DEFAULT_ENV in environment".format(stderr)
assert_equals(stderr,'')
#-----------------------------------------------------------------------
# TEST 6: deactivate script in activated env unsets CONDA_DEFAULT_ENV
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_deactivate = shell_vars['source'].format(
"{env_dirs[0]}{binpath}deactivate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{} {{nul}}
{}
{{envvar_getall}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} {{nul}}
{}
{{envvar_getall}}
""")]
for script in scripts:
script = script.format(src_activate,src_deactivate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
stdout = [s.split("=")[0] for s in stdout.split("\n")]
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert "CONDA_DEFAULT_ENV" not in stdout, "{} cannot find CONDA_DEFAULT_ENV in environment".format(stderr)
assert_equals(stderr,'')
#-----------------------------------------------------------------------
# TEST 7: make sure CONDA_DEFAULT_ENV is not set by faulty activate input
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
# cannot accidentally pass too many args to program when setting environment variables
scripts += []
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} two args
{{defaultenv_print}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, '', stderr)
assert_in('[ACTIVATE]: ERROR: Unknown/invalid flag/parameter',stderr)
#-----------------------------------------------------------------------
# TEST 8: activating root sets CONDA_DEFAULT_ENV correctly
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[root]}}
{} {{nul}}
{{defaultenv_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[root]}}" {{nul}}
{{defaultenv_print}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout.rstrip(), 'root', stderr)
assert_equals(stderr,'')
#-----------------------------------------------------------------------
# TEST 9: activating and deactivating from root unsets CONDA_DEFAULT_ENV correctly
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_deactivate = shell_vars['source'].format(
"{env_dirs[0]}{binpath}deactivate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[root]}}
{} {{nul}}
{} {{nul}}
{{envvar_getall}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[root]}}" {{nul}}
{} {{nul}}
{{envvar_getall}}
""")]
for script in scripts:
script = script.format(src_activate,src_deactivate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
stdout = [s.split("=")[0] for s in stdout.split("\n")]
print("commands:",commands)
print("stdout:","\n".join(stdout))
print("stderr:",stderr)
assert "CONDA_DEFAULT_ENV" not in stdout, "{} cannot find CONDA_DEFAULT_ENV in environment".format(stderr)
assert_equals(stderr,'')
@pytest.mark.installed
def test_activate_from_env(shell):
"""Tests whether the activate bat file or link in the activated environment works OK"""
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_activate_0 = shell_vars['source'].format(
"{env_dirs[0]}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{}
{{env_vars[1]}}
{}
{{defaultenv_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}"
{} "{{env_dirs[1]}}"
{{defaultenv_print}}
""")]
for script in scripts:
script = script.format(src_activate,src_activate_0)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
# rstrip on output is because the printing to console picks up an extra space
assert_equals(stdout.rstrip(), env_dirs[1], stderr)
assert_equals(stderr,'')
@pytest.mark.installed
def test_deactivate_from_env(shell):
"""Tests whether the deactivate bat file or link in the activated environment works OK"""
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_deactivate = shell_vars['source'].format(
"{env_dirs[0]}{binpath}deactivate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{}
{}
{{envvar_getall}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}"
{}
{{envvar_getall}}
""")]
for script in scripts:
script = script.format(src_activate,src_deactivate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
var="CONDA_DEFAULT_ENV",
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
stdout = [s.split("=")[0] for s in stdout.split("\n")]
print("commands:",commands)
print("stdout:","\n".join(stdout))
print("stderr:",stderr)
assert "CONDA_DEFAULT_ENV" not in stdout, "{} cannot find CONDA_DEFAULT_ENV in environment".format(stderr)
assert_equals(stderr,'')
@pytest.mark.installed
def test_activate_relative_path(shell):
"""
current directory should be searched for environments
"""
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
work_dir = os.path.dirname(env_dirs[0])
env_dir = os.path.basename(env_dirs[0])
env_var = shell_vars["var_set"].format(variable="CONDA_ENVNAME",value=env_dir)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
cd {{work_dir}}
{{env_var}}
{}
{{defaultenv_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
cd {{work_dir}}
{} "{{env_dir}}"
{{defaultenv_print}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
work_dir=work_dir,
env_var=env_var,
env_dir=env_dir,
**shell_vars)
commands = shell_vars['command_setup'] + script
cwd = os.getcwd()
# this is not effective for running bash on windows. It starts
# in your home dir no matter what. That's what the cd is for above.
os.chdir(envs)
try:
stdout, stderr = run_in(commands, shell, cwd=envs)
except:
raise
finally:
os.chdir(cwd)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout.rstrip(), env_dir, stderr)
assert_equals(stderr,'')
@pytest.mark.installed
def test_activate_does_not_leak_echo_setting(shell):
"""Test that activate's setting of echo to off does not disrupt later echo calls"""
if not on_win or shell != "cmd.exe":
pytest.skip("echo leaking is only relevant on Window's CMD.EXE")
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# since we are only testing for cmd.exe only need to check for parameter passing
scripts += [dedent("""\
@ECHO ON
{} "{{env_dirs[0]}}"
@ECHO
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, u'ECHO is on.', stderr)
assert_equals(stderr, '')
@pytest.mark.skip(reason="I just can't with this test right now.")
@pytest.mark.installed
def test_activate_non_ascii_char_in_path(shell):
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='Ånvs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_deactivate = shell_vars['source'].format(
"{env_dirs[0]}{binpath}deactivate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{}
{}
{{defaultenv_print}}.
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}"
{}
{{defaultenv_print}}.
""")]
for script in scripts:
script = script.format(src_activate,src_deactivate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, u'.', stderr)
assert_equals(stderr,'')
@pytest.mark.installed
def test_activate_has_extra_env_vars(shell):
"""Test that environment variables in activate.d show up when activated"""
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
testvariable="TEST_VAR"
testvalue="test"
dir=os.path.join(shell_vars['path_from'](env_dirs[0]), "etc", "conda", "activate.d")
os.makedirs(dir)
file="test{}".format(shell_vars["suffix_script"])
file=os.path.join(dir,file)
with open(file, 'w') as f:
# do long winded format to ensure that script ends with a newline
f.write(dedent("""\
{}
""").format(shell_vars["envvar_set"].format(
variable=testvariable,
value=testvalue)))
with open(file, 'r') as f:
print(f.read())
dir=os.path.join(shell_vars['path_from'](env_dirs[0]), "etc", "conda", "deactivate.d")
os.makedirs(dir)
file="test{}".format(shell_vars["suffix_script"])
file=os.path.join(dir,file)
with open(file, 'w') as f:
# do long winded format to ensure that script ends with a newline
f.write(dedent("""\
{}
""").format(shell_vars["envvar_unset"].format(
variable=testvariable)))
with open(file, 'r') as f:
print(f.read())
#-----------------------------------------------------------------------
# TEST ACTIVATE
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{}
{{envvar_getall}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}"
{{envvar_getall}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
stdout = [s.split("=")[0] for s in stdout.split("\n")]
print("commands:",commands)
print("stdout:","\n".join(stdout))
print("stderr:",stderr)
assert testvariable in stdout, "{} cannot find {} in environment".format(stderr,testvariable)
assert_equals(stderr,'')
#-----------------------------------------------------------------------
# TEST DEACTIVATE
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_deactivate = shell_vars['source'].format(
"{env_dirs[0]}{binpath}deactivate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{}
{}
{{envvar_getall}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}"
{}
{{envvar_getall}}
""")]
for script in scripts:
script = script.format(src_activate,src_deactivate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
stdout = [s.split("=")[0] for s in stdout.split("\n")]
print("commands:",commands)
print("stdout:","\n".join(stdout))
print("stderr:",stderr)
assert testvariable not in stdout, "{} cannot find {} in environment".format(stderr,testvariable)
assert_equals(stderr,'')
@pytest.mark.installed
def test_activate_verbose(shell):
"""Test that environment variables in activate.d show up when activated"""
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
testvariable="TEST_VAR"
testvalue="test"
dir=os.path.join(shell_vars['path_from'](env_dirs[0]), "etc", "conda", "activate.d")
os.makedirs(dir)
file="test{}".format(shell_vars["suffix_script"])
file=os.path.join(dir,file)
with open(file, 'w') as f:
f.write(shell_vars["envvar_set"].format(
variable=testvariable,
value=testvalue))
dir=os.path.join(shell_vars['path_from'](env_dirs[0]), "etc", "conda", "deactivate.d")
os.makedirs(dir)
file="test{}".format(shell_vars["suffix_script"])
file=os.path.join(dir,file)
with open(file, 'w') as f:
f.write(shell_vars["envvar_unset"].format(
variable=testvariable))
#-----------------------------------------------------------------------
# TEST ACTIVATE
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{{verbose_var}}
{}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}" "{{flags_verbose}}"
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
verbose_var=shell_vars["var_set"].format(variable="CONDA_VERBOSE",value="true"),
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_in('[ACTIVATE]: Sourcing',stdout,shell)
assert_equals(stderr,'')
#-----------------------------------------------------------------------
# TEST DEACTIVATE
#-----------------------------------------------------------------------
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_deactivate = shell_vars['source'].format(
"{env_dirs[0]}{binpath}deactivate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{env_vars[0]}}
{}
{{verbose_var}}
{}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{} "{{env_dirs[0]}}"
{} "{{flags_verbose}}"
""")]
for script in scripts:
script = script.format(src_activate,src_deactivate)
script = script.format(
verbose_var=shell_vars["var_set"].format(variable="CONDA_VERBOSE",value="true"),
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_in('[DEACTIVATE]: Sourcing',stdout,shell)
assert_equals(stderr,'')
@pytest.mark.installed
def test_activate_noPS1(shell):
if shell in ['powershell.exe']:
pytest.skip("powershell.exe doesn't support prompt modifications yet")
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
# all unix shells support environment variables instead of parameter passing
# windows supports this but is complicated in how it works and hence difficult to test
if shell not in ["cmd.exe","bash.exe"]:
scripts += [dedent("""\
{{prompt_unset}}
{{env_vars[0]}}
{}
{{path_print}}
""")]
# most unix shells support parameter passing, dash is the exception
if shell.split(".")[0] not in ["dash","sh","csh","posh"]:
scripts += [dedent("""\
{{prompt_unset}}
{} "{{env_dirs[0]}}"
{{path_print}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell)
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_in(shell_vars['path_delim'].join(_envpaths(envs, 'test 1', shelldict=shell_vars)),
stdout, shell)
assert_equals(stderr,'')
@pytest.mark.installed
def test_activate_with_e(shell):
if shell.split(".")[0] not in ["bash"]:
pytest.skip("-e only available on bash")
# in certain cases it is desired to run activate with -e (as is done
# when running conda-build)
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
scripts += [dedent("""\
{{env_vars[0]}}
{}
{{path_print}}
""")]
scripts += [dedent("""\
{} "{{env_dirs[0]}}"
{{path_print}}
""")]
for script in scripts:
script = script.format(src_activate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell, extra_args="-e")
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_in(shell_vars['path_delim'].join(_envpaths(envs, 'test 1', shelldict=shell_vars)),
stdout, shell)
assert_equals(stderr,'')
@pytest.mark.installed
def test_deactivate_with_e(shell):
if shell.split(".")[0] not in ["bash"]:
pytest.skip("-e only available on bash")
# in certain cases it is desired to run activate with -e (as is done
# when running conda-build)
shell_vars = _format_vars(shell)
with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
env_dirs,env_vars=gen_test_env_paths(envs, shell)
scripts = []
src_activate = shell_vars['source'].format(
"{syspath}{binpath}activate{suffix_executable}")
src_deactivate = shell_vars['source'].format(
"{env_dirs[0]}{binpath}deactivate{suffix_executable}")
scripts += [dedent("""\
{{env_vars[0]}}
{}
{}
{{path_print}}
""")]
scripts += [dedent("""\
{} "{{env_dirs[0]}}"
{}
{{path_print}}
""")]
for script in scripts:
script = script.format(src_activate,src_deactivate)
script = script.format(
env_vars=env_vars,
env_dirs=env_dirs,
**shell_vars)
commands = shell_vars['command_setup'] + script
stdout, stderr = run_in(commands, shell, extra_args="-e")
print("commands:",commands)
print("stdout:",stdout)
print("stderr:",stderr)
assert_equals(stdout, u"%s" % shell_vars['base_path'], stderr)
assert_equals(stderr,'')
# @pytest.mark.slow
# def test_activate_keeps_PATH_order(shell):
# if not on_win or shell != "cmd.exe":
# pytest.xfail("test only implemented for cmd.exe on win")
# shell_vars = _format_vars(shell)
# with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
# commands = shell_vars['command_setup'] + dedent("""\
# @set "PATH=somepath;CONDA_PATH_PLACEHOLDER;%PATH%"
# @call "{syspath}{binpath}activate.bat"
# {path_print}
# """).format(
# envs=envs,
# env_dirs=gen_test_env_paths(envs, shell),
# **shell_vars)
# stdout, stderr = run_in(commands, shell)
# assert stdout.startswith("somepath;" + sys.prefix)
# @pytest.mark.slow
# def test_deactivate_placeholder(shell):
# if not on_win or shell != "cmd.exe":
# pytest.xfail("test only implemented for cmd.exe on win")
# shell_vars = _format_vars(shell)
# with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
# commands = shell_vars['command_setup'] + dedent("""\
# @set "PATH=flag;%PATH%"
# @call "{syspath}{binpath}activate.bat"
# @call "{syspath}{binpath}deactivate.bat" "hold"
# {path_print}
# """).format(
# envs=envs,
# env_dirs=gen_test_env_paths(envs, shell),
# **shell_vars)
# stdout, stderr = run_in(commands, shell)
# assert stdout.startswith("CONDA_PATH_PLACEHOLDER;flag")
# This test depends on files that are copied/linked in the conda recipe. It is unfortunately not going to run after
# a setup.py install step
# @pytest.mark.slow
# def test_activate_from_exec_folder(shell):
# """The exec folder contains only the activate and conda commands. It is for users
# who want to avoid conda packages shadowing system ones."""
# shell_vars = _format_vars(shell)
# with TemporaryDirectory(prefix='envs', dir=os.path.dirname(__file__)) as envs:
# env_dirs=gen_test_env_paths(envs, shell)
# commands = shell_vars['command_setup'] + dedent("""\
# {source} "{syspath}/exec/activate{suffix_executable}" "{env_dirs[0]}"
# {echo} {var}
# """).format(
# envs=envs,
# env_dirs=env_dirs,
# var=shell_vars["var_format"].format("TEST_VAR"),
# **shell_vars)
# stdout, stderr = run_in(commands, shell)
# assert_equals(stdout, u'test', stderr)
def run_in(command, shell, cwd=None, env=None, extra_args=""):
if hasattr(shell, "keys"):
shell = shell["exe"]
if shell in ["cmd.exe","powershell.exe"]:
# create temporary script with the commands to run, then execute script
with tempfile.NamedTemporaryFile(suffix=shells[shell]["suffix_script"],
mode='w+t',
delete=False) as cmd_script:
cmd_name=cmd_script.name
cmd_script.write(command)
with open(cmd_name, "r") as f:
print("cmd_bits: {{{}}}".format(f.read()))
cmd_bits = dedent("""\
{exe} {shell_args} {script}
""").format(
script=cmd_name,
**shells[shell])
try:
p = subprocess.Popen(cmd_bits,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
env=env)
streams = p.communicate()
finally:
# unlink temporary file such that it is garbage collected
os.unlink(cmd_name)
elif any(map(lambda ext: shell.endswith(ext), [".cygwin",".mingw",".msys"])):
with tempfile.NamedTemporaryFile(suffix=shells["cmd.exe"]["suffix_script"],
mode='w+b',
delete=False) as cmd_script:
cmd_name=cmd_script.name
cmd_script.write(dedent("""\
: <<TRAMPOLINE
@CALL {exe} -c "exit 0" || (@ECHO Shell {exe} not found on PATH & @EXIT /b 1)
@SET "PATH={pathprefix};%PATH%"
@CALL {exe} {shell_args} {extra_args} "%~f0"
@GOTO :EOF
TRAMPOLINE
#####################
#!/usr/bin/env {shebang}
{command}
""").format(
command=command,
extra_args=extra_args,
# using .exe in shebang causes issues
shebang=re.sub(r'\.\w+$',r'',os.path.basename(shells[shell]["exe"])),
**shells[shell]).encode())
with open(cmd_name, "r") as f:
print("cmd_bits: {{{}}}".format(f.read()))
cmd_bits = dedent("""\
{exe} {shell_args} {script}
""").format(
script=cmd_name,
**shells["cmd.exe"])
try:
p = subprocess.Popen(cmd_bits,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
env=env)
streams = p.communicate()
finally:
# unlink temporary file such that it is garbage collected
os.unlink(cmd_name)
else:
# heredoc/hereword are the closest we can get to truly mimicking a
# proper sourcing of the activate/deactivate scripts
#
# must use heredoc to avoid Ubuntu/dash incompatibility with hereword
cmd_bits = dedent("""\
{exe} {shell_args} {extra_args} <<- 'HEREDOC'
{command}
HEREDOC
""").format(
command=translate_stream(command, shells[shell]["path_to"]),
extra_args=extra_args,
**shells[shell])
print("cmd_bits: {{{}}}".format(cmd_bits))
p = subprocess.Popen(cmd_bits,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
env=env)
streams = p.communicate()
return map(lambda s: u"{}".format(s.decode('utf-8').replace('\r\n', '\n').rstrip("\n")), streams)
| 39.397398
| 118
| 0.530662
| 10,991
| 105,979
| 4.93158
| 0.050405
| 0.045662
| 0.033873
| 0.027508
| 0.880154
| 0.869528
| 0.859454
| 0.853403
| 0.846005
| 0.833625
| 0
| 0.003964
| 0.31677
| 105,979
| 2,689
| 119
| 39.412049
| 0.744614
| 0.206381
| 0
| 0.879373
| 0
| 0.001567
| 0.263624
| 0.045164
| 0
| 0
| 0
| 0
| 0.061619
| 1
| 0.018799
| false
| 0
| 0.007833
| 0.000522
| 0.030287
| 0.12376
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5ad16eb1e631ea7f5260aa5b52bc2870922a2c17
| 14,408
|
py
|
Python
|
pywick/models/segmentation/testnets/exfuse/UnetExFuse.py
|
achaiah/pywick
|
9d663faf0c1660a9b8359a6472c164f658dfc8cb
|
[
"MIT"
] | 408
|
2019-05-16T16:12:41.000Z
|
2022-03-26T17:27:12.000Z
|
pywick/models/segmentation/testnets/exfuse/UnetExFuse.py
|
ashishpatel26/pywick
|
1afffd1c21c2b188836d3599e802146182757bb5
|
[
"MIT"
] | 13
|
2019-05-17T05:47:06.000Z
|
2021-06-21T19:02:30.000Z
|
pywick/models/segmentation/testnets/exfuse/UnetExFuse.py
|
ashishpatel26/pywick
|
1afffd1c21c2b188836d3599e802146182757bb5
|
[
"MIT"
] | 42
|
2019-05-16T19:57:12.000Z
|
2022-03-06T15:23:18.000Z
|
# Source: https://github.com/rplab-snu/nucleus_segmentation
"""
Implementation of `ExFuse: Enhancing Feature Fusion for SemanticSegmentation <https://arxiv.org/abs/1804.03821>`_
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .unet_layer import UnetConv2D, UnetUpConv2D, weights_init_kaiming, ConvBNReLU
from .ExFuseLayer import SEB, GCN, ECRE, DAP, UnetExFuseLevel
__all__ = ['UnetExFuse', 'UnetGCN', 'UnetGCNECRE', 'UnetGCNECRE_v2', 'UnetGCNSEB']
class UnetGCN(nn.Module):
def __init__(self, feature_scale=4, n_classes=1, is_deconv=True, norm=nn.InstanceNorm2d, is_pool=True):
super(UnetGCN, self).__init__()
filters = [64, 128, 256, 512, 1024]
filters = [x // feature_scale for x in filters]
# downsampling
self.conv1 = UnetConv2D(1, filters[0], norm)
self.gcn1 = GCN(filters[0], filters[0])
self.maxpool1 = nn.MaxPool2d(kernel_size=2) if is_pool else ConvBNReLU(filters[0], filters[0], norm, stride=2)
self.conv2 = UnetConv2D(filters[0], filters[1], norm)
self.gcn2 = GCN(filters[1], filters[1])
self.maxpool2 = nn.MaxPool2d(kernel_size=2) if is_pool else ConvBNReLU(filters[1], filters[1], norm, stride=2)
self.conv3 = UnetConv2D(filters[1], filters[2], norm)
self.gcn3 = GCN(filters[2], filters[2])
self.maxpool3 = nn.MaxPool2d(kernel_size=2) if is_pool else ConvBNReLU(filters[2], filters[2], norm, stride=2)
self.conv4 = UnetConv2D(filters[2], filters[3], norm)
self.gcn4 = GCN(filters[3], filters[3])
self.maxpool4 = nn.MaxPool2d(kernel_size=2) if is_pool else ConvBNReLU(filters[3], filters[3], norm, stride=2)
self.center = UnetConv2D(filters[3], filters[4], norm)
# upsampling
self.up_concat4 = UnetUpConv2D(filters[4], filters[3], norm, is_deconv)
self.up_concat3 = UnetUpConv2D(filters[3], filters[2], norm, is_deconv)
self.up_concat2 = UnetUpConv2D(filters[2], filters[1], norm, is_deconv)
self.up_concat1 = UnetUpConv2D(filters[1], filters[0], norm, is_deconv)
# final conv (without any concat)
self.final = nn.Conv2d(filters[0], n_classes, 1)
# initialise weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.apply(weights_init_kaiming)
elif isinstance(m, nn.BatchNorm2d):
m.apply(weights_init_kaiming)
def forward(self, inputs):
conv1 = self.conv1(inputs)
conv1 = self.gcn1(conv1)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
conv2 = self.gcn2(conv2)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
conv3 = self.gcn3(conv3)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
conv4 = self.gcn4(conv4)
maxpool4 = self.maxpool4(conv4)
center = self.center(maxpool4)
up4 = self.up_concat4(conv4, center)
up3 = self.up_concat3(conv3, up4)
up2 = self.up_concat2(conv2, up3)
up1 = self.up_concat1(conv1, up2)
final = self.final(up1)
return final
class UnetGCNSEB(nn.Module):
def __init__(self, feature_scale=4, n_classes=1, is_deconv=True, norm=nn.InstanceNorm2d, is_pool=True):
super(UnetGCNSEB, self).__init__()
filters = [64, 128, 256, 512, 1024]
filters = [x // feature_scale for x in filters]
# downsampling
self.conv1 = UnetConv2D(1, filters[0], norm)
self.gcn1 = GCN(filters[0], filters[0])
self.maxpool1 = nn.MaxPool2d(kernel_size=2) if is_pool else ConvBNReLU(filters[0], filters[0], norm, stride=2)
self.conv2 = UnetConv2D(filters[0], filters[1], norm)
self.gcn2 = GCN(filters[1], filters[1])
self.maxpool2 = nn.MaxPool2d(kernel_size=2) if is_pool else ConvBNReLU(filters[1], filters[1], norm, stride=2)
self.conv3 = UnetConv2D(filters[1], filters[2], norm)
self.gcn3 = GCN(filters[2], filters[2])
self.maxpool3 = nn.MaxPool2d(kernel_size=2) if is_pool else ConvBNReLU(filters[2], filters[2], norm, stride=2)
self.conv4 = UnetConv2D(filters[2], filters[3], norm)
self.gcn4 = GCN(filters[3], filters[3])
self.maxpool4 = nn.MaxPool2d(kernel_size=2) if is_pool else ConvBNReLU(filters[3], filters[3], norm, stride=2)
self.center = UnetConv2D(filters[3], filters[4], norm)
# upsampling
self.up_concat4 = SEB(filters[4], filters[3])
self.up_concat3 = SEB(filters[3], filters[2])
self.up_concat2 = SEB(filters[2], filters[1])
self.up_concat1 = SEB(filters[1], filters[0])
# final conv (without any concat)
self.final = nn.Conv2d(filters[0], 1, 1)
# initialise weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.apply(weights_init_kaiming)
elif isinstance(m, nn.BatchNorm2d):
m.apply(weights_init_kaiming)
def forward(self, inputs):
conv1 = self.conv1(inputs)
conv1 = self.gcn1(conv1)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
conv2 = self.gcn2(conv2)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
conv3 = self.gcn3(conv3)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
conv4 = self.gcn4(conv4)
maxpool4 = self.maxpool4(conv4)
center = self.center(maxpool4)
up4 = self.up_concat4(conv4, center)
up3 = self.up_concat3(conv3, up4)
up2 = self.up_concat2(conv2, up3)
up1 = self.up_concat1(conv1, up2)
final = self.final(up1)
return final
class UnetUpECRE(nn.Module):
def __init__(self, in_size, out_size, norm, is_deconv=False):
super(UnetUpECRE, self).__init__()
self.conv = UnetConv2D(in_size + out_size, out_size, norm)
self.up = ECRE(in_size)
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('UnetConv2D') != -1:
continue
m.apply(weights_init_kaiming)
def forward(self, input1, input2):
output2 = self.up(input2)
offset = output2.size()[2] - input1.size()[2]
padding = [offset // 2] * 4
output1 = F.pad(input1, padding)
output = torch.cat([output1, output2], 1)
return self.conv(output), output2
class UnetGCNECRE(nn.Module):
def __init__(self, feature_scale=4, n_classes=1, is_deconv=True, norm=nn.InstanceNorm2d, is_pool=True):
super(UnetGCNECRE, self).__init__()
filters = [64, 128, 256, 512, 1024]
filters = [x // feature_scale for x in filters]
# downsampling
self.conv1 = UnetConv2D(1, filters[0], norm)
self.gcn1 = GCN(filters[0], filters[0])
self.maxpool1 = nn.MaxPool2d(kernel_size=2) if is_pool else ConvBNReLU(filters[0], filters[0], norm, stride=2)
self.conv2 = UnetConv2D(filters[0], filters[1], norm)
self.gcn2 = GCN(filters[1], filters[1])
self.maxpool2 = nn.MaxPool2d(kernel_size=2) if is_pool else ConvBNReLU(filters[1], filters[1], norm, stride=2)
self.conv3 = UnetConv2D(filters[1], filters[2], norm)
self.gcn3 = GCN(filters[2], filters[2])
self.maxpool3 = nn.MaxPool2d(kernel_size=2) if is_pool else ConvBNReLU(filters[2], filters[2], norm, stride=2)
self.conv4 = UnetConv2D(filters[2], filters[3], norm)
self.gcn4 = GCN(filters[3], filters[3])
self.maxpool4 = nn.MaxPool2d(kernel_size=2) if is_pool else ConvBNReLU(filters[3], filters[3], norm, stride=2)
self.center = UnetConv2D(filters[3], filters[4], norm)
# upsampling
self.up_concat4 = UnetUpECRE(filters[4], filters[3], norm)
self.up_concat3 = UnetUpECRE(filters[3], filters[2], norm)
self.up_concat2 = UnetUpECRE(filters[2], filters[1], norm)
self.up_concat1 = UnetUpECRE(filters[1], filters[0], norm)
# final conv (without any concat)
self.final = nn.Conv2d(filters[0], n_classes, 1)
# For aux loss
self.ecre4 = ConvBNReLU(filters[4], 1, norm, stride=2)
self.ecre3 = ConvBNReLU(filters[3], 1, norm, stride=2)
self.ecre2 = ConvBNReLU(filters[2], 1, norm, stride=2)
self.ecre1 = ConvBNReLU(filters[1], 1, norm, stride=2)
# initialise weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.apply(weights_init_kaiming)
elif isinstance(m, nn.BatchNorm2d):
m.apply(weights_init_kaiming)
def forward(self, inputs):
conv1 = self.conv1(inputs)
conv1 = self.gcn1(conv1)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
conv2 = self.gcn2(conv2)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
conv3 = self.gcn3(conv3)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
conv4 = self.gcn4(conv4)
maxpool4 = self.maxpool4(conv4)
center = self.center(maxpool4)
up4, ecre4 = self.up_concat4(conv4, center)
up3, ecre3 = self.up_concat3(conv3, up4)
up2, ecre2 = self.up_concat2(conv2, up3)
up1, ecre1 = self.up_concat1(conv1, up2)
final = self.final(up1)
return final # , self.ecre4(ecre4), self.ecre3(ecre3), self.ecre2(ecre2), self.ecre1(ecre1)
class UnetGCNECRE_v2(UnetGCNECRE):
# Move GCN Module in forward
def __init__(self, feature_scale=4, n_classes=1, is_deconv=True, norm=nn.InstanceNorm2d, is_pool=True):
super(UnetGCNECRE_v2, self).__init__(feature_scale=feature_scale, n_classes=n_classes, is_deconv=is_deconv, norm=norm, is_pool=is_pool)
def forward(self, inputs):
conv1 = self.conv1(inputs)
gcn1 = self.gcn1(conv1)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
gcn2 = self.gcn2(conv2)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
gcn3 = self.gcn3(conv3)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
gcn4 = self.gcn4(conv4)
maxpool4 = self.maxpool4(conv4)
center = self.center(maxpool4)
up4 = self.up_concat4(gcn4, center)
up3 = self.up_concat3(gcn3, up4)
up2 = self.up_concat2(gcn2, up3)
up1 = self.up_concat1(gcn1, up2)
final = self.final(up1)
return final
class UnetGCNECRE_v3(UnetGCNECRE):
# Add Auxiliary Supervision Loss
# For Exfuse trainer
def __init__(self, feature_scale=4, n_classes=1,
is_deconv=True, norm=nn.InstanceNorm2d, is_pool=True):
super(UnetGCNECRE_v3, self).__init__(feature_scale=feature_scale, n_classes=n_classes,
is_deconv=is_deconv, norm=norm, is_pool=is_pool)
def forward(self, inputs):
conv1 = self.conv1(inputs)
gcn1 = self.gcn1(conv1)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
gcn2 = self.gcn2(conv2)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
gcn3 = self.gcn3(conv3)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
gcn4 = self.gcn4(conv4)
maxpool4 = self.maxpool4(conv4)
center = self.center(maxpool4)
up4 = self.up_concat4(gcn4, center)
up3 = self.up_concat3(gcn3, up4)
up2 = self.up_concat2(gcn2, up3)
up1 = self.up_concat1(gcn1, up2)
final = self.final(up1)
return final
class UnetExFuse(nn.Module):
def __init__(self, num_classes=1, pretrained=False, feature_scale=4, is_deconv=True, norm=nn.InstanceNorm2d, is_pool=True, **kwargs):
super(UnetExFuse, self).__init__()
filters = [64, 128, 256, 512, 1024]
filters = [x // feature_scale for x in filters]
# downsampling
self.conv1 = UnetConv2D(1, filters[0], norm)
self.gcn1 = GCN(filters[0], filters[0])
self.maxpool1 = nn.MaxPool2d(kernel_size=2) if is_pool else ConvBNReLU(filters[0], filters[0], norm, stride=2)
self.conv2 = UnetConv2D(filters[0], filters[1], norm)
self.gcn2 = GCN(filters[1], filters[1])
self.maxpool2 = nn.MaxPool2d(kernel_size=2) if is_pool else ConvBNReLU(filters[1], filters[1], norm, stride=2)
self.conv3 = UnetConv2D(filters[1], filters[2], norm)
self.gcn3 = GCN(filters[2], filters[2])
self.maxpool3 = nn.MaxPool2d(kernel_size=2) if is_pool else ConvBNReLU(filters[2], filters[2], norm, stride=2)
self.conv4 = UnetConv2D(filters[2], filters[3], norm)
self.gcn4 = GCN(filters[3], filters[3])
self.maxpool4 = nn.MaxPool2d(kernel_size=2) if is_pool else ConvBNReLU(filters[3], filters[3], norm, stride=2)
self.center = UnetConv2D(filters[3], filters[4], norm)
# upsampling
self.up_concat4 = UnetUpConv2D(filters[4], filters[3], norm, is_deconv)
self.level4 = UnetExFuseLevel(filters[3], filters[2])
self.level3 = UnetExFuseLevel(filters[2], filters[1])
self.level2 = UnetExFuseLevel(filters[1], filters[0])
self.final = nn.Sequential(DAP(filters[0]), nn.Conv2d(filters[0], 1, 1))
# initialise weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.apply(weights_init_kaiming)
elif isinstance(m, nn.BatchNorm2d):
m.apply(weights_init_kaiming)
def forward(self, inputs):
conv1 = self.conv1(inputs)
conv1 = self.gcn1(conv1)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
conv2 = self.gcn2(conv2)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
conv3 = self.gcn3(conv3)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
conv4 = self.gcn4(conv4)
maxpool4 = self.maxpool4(conv4)
center = self.center(maxpool4)
up4 = self.up_concat4(conv4, center)
l4 = self.level4(conv4, center, up4)
l3 = self.level3(conv3, conv4, l4)
l2 = self.level2(conv2, conv3, l3)
final = self.final(l2)
return final
| 37.326425
| 143
| 0.626874
| 1,873
| 14,408
| 4.704752
| 0.085958
| 0.024512
| 0.024966
| 0.032342
| 0.820132
| 0.782002
| 0.773491
| 0.773491
| 0.769178
| 0.764412
| 0
| 0.069306
| 0.247918
| 14,408
| 385
| 144
| 37.423377
| 0.743909
| 0.043726
| 0
| 0.755639
| 0
| 0
| 0.004509
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.018797
| 0
| 0.12406
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
51779552d615897f982972f390e93ad2ccd398c2
| 57,371
|
py
|
Python
|
lib/minkowski.py
|
heyfaraday/CMB_test
|
ff4c63bd5797dec02c23338c67e761ef62c87338
|
[
"MIT"
] | null | null | null |
lib/minkowski.py
|
heyfaraday/CMB_test
|
ff4c63bd5797dec02c23338c67e761ef62c87338
|
[
"MIT"
] | null | null | null |
lib/minkowski.py
|
heyfaraday/CMB_test
|
ff4c63bd5797dec02c23338c67e761ef62c87338
|
[
"MIT"
] | 1
|
2022-02-13T04:24:42.000Z
|
2022-02-13T04:24:42.000Z
|
def area(y, field, level=0.0):
from math import cos
a = 0.0 # Area without normalization
na = 0.0 # Normalization
n = field.shape[0] - 1
for i in xrange(0, n):
for j in xrange(1, n / 2):
mean = (field[i][j] + field[i + 1][j + 1] + field[i + 1][j] + field[i][j + 1]) / 4.0
if mean >= level:
a += cos(y[i][j])
na += cos(y[i][j])
return a / na
def length(x, y, field, level=0.0):
from math import fabs, pi
from distance import s2
n = field.shape[0]-1
l = 0.0
f = field - level
for i in xrange(0, n):
for j in xrange(1, n / 2):
h_theta = fabs(y[n / 2 + 1][n / 4 + 1])
h_phi = fabs(x[n / 4][j] - x[n / 4 + 1][j])
if f[i][j] == 0.0 and (f[i][j + 1] == 0.0 or f[i + 1][j] == 0.0):
l += 0.0
if f[i][j] * f[i][j + 1] < 0.0:
if f[i][j] * f[i + 1][j] < 0.0:
phi1 = x[i][j]
theta1 = y[i][j] + h_theta * fabs(f[i][j]) / (fabs(f[i][j]) + fabs(f[i][j + 1]))
phi2 = x[i][j] + h_phi * fabs(f[i][j]) / (fabs(f[i][j]) + fabs(f[i + 1][j]))
theta2 = y[i][j]
l += s2(phi1, phi2, theta1, theta2)
elif f[i + 1][j] * f[i + 1][j + 1] < 0.0:
phi1 = x[i][j]
theta1 = y[i][j] + h_theta * fabs(f[i][j]) / (fabs(f[i][j]) + fabs(f[i][j + 1]))
phi2 = x[i + 1][j]
theta2 = y[i + 1][j] + h_theta * fabs(f[i + 1][j]) / (fabs(f[i + 1][j]) + fabs(f[i + 1][j + 1]))
l += s2(phi1, phi2, theta1, theta2)
elif f[i][j + 1] * f[i + 1][j + 1] < 0.0:
phi1 = x[i][j]
theta1 = y[i][j] + h_theta * fabs(f[i][j]) / (fabs(f[i][j]) + fabs(f[i][j + 1]))
phi2 = x[i][j + 1] + h_phi * fabs(f[i][j + 1]) / (fabs(f[i][j + 1]) + fabs(f[i + 1][j + 1]))
theta2 = y[i][j + 1]
l += s2(phi1, phi2, theta1, theta2)
elif f[i][j] * f[i + 1][j] <= 0.0:
if f[i + 1][j] * f[i + 1][j + 1] < 0.0:
phi1 = x[i][j] + h_phi * fabs(f[i][j]) / (fabs(f[i][j]) + fabs(f[i + 1][j]))
theta1 = y[i][j]
phi2 = x[i + 1][j]
theta2 = y[i + 1][j] + h_theta * fabs(f[i + 1][j]) / (fabs(f[i + 1][j]) + fabs(f[i + 1][j + 1]))
l += s2(phi1, phi2, theta1, theta2)
elif f[i][j + 1] * f[i + 1][j + 1] < 0.0:
phi1 = x[i][j] + h_phi * fabs(f[i][j]) / (fabs(f[i][j]) + fabs(f[i + 1][j]))
theta1 = y[i][j]
phi2 = x[i][j + 1] + h_phi * fabs(f[i][j + 1]) / (fabs(f[i][j + 1]) + fabs(f[i + 1][j + 1]))
theta2 = y[i][j + 1]
l += s2(phi1, phi2, theta1, theta2)
elif f[i + 1][j] * f[i + 1][j + 1] < 0.0:
if f[i][j + 1] * f[i + 1][j + 1] < 0.0:
phi1 = x[i + 1][j]
theta1 = y[i + 1][j] + h_theta * fabs(f[i + 1][j]) / (fabs(f[i + 1][j]) + fabs(f[i + 1][j + 1]))
phi2 = x[i][j + 1] + h_phi * fabs(f[i][j + 1]) / (fabs(f[i][j + 1]) + fabs(f[i + 1][j + 1]))
theta2 = y[i][j + 1]
l += s2(phi1, phi2, theta1, theta2)
return l / (4 * pi)
def condition_1(xx, yy, xy):
if (xx * yy - xy * xy >= 0.0 and xx >= 0.0) or (xx * yy - xy * xy >= 0.0 and yy > 0.0):
return 0
else:
if (xx * yy - xy * xy >= 0.0 >= xx) or (xx * yy - xy * xy >= 0.0 >= yy):
return 2
else:
return 1
def condition_2(qx, qy, ux, uy):
from numpy import roots, imag
parameter = 1e-5
root1, root2, root3 = roots([uy, ux+2*qy, 2*qx-uy, -ux])
if (qx*uy - qy*ux) < 0:
return 0
elif (-parameter < imag(root1) < parameter and
-parameter < imag(root2) < parameter and
-parameter < imag(root3) < parameter):
return 1
else:
return 2
def type_points(x, y, f, fx, fy, fxx, fyy, fxy, sigma_0, sigma_1, sigma_2, my_file=False, my_cmbmap=False,
up_bounds=False, down_bounds=False, whitelist=False, whitelist_flag=False):
from numpy import zeros
from math import fabs, pi
from lib.distance import s2, cross
n = f.shape[0] - 1
g = 0.0
n_min = 0.0
n_max = 0.0
n_sad = 0.0
z_x = zeros((n + 1, n / 2 + 1))
z_y = zeros((n + 1, n / 2 + 1))
phi1a = 0.0
phi1b = 0.0
phi2a = 0.0
phi2b = 0.0
theta1a = 0.0
theta1b = 0.0
theta2a = 0.0
theta2b = 0.0
for i in xrange(0, n):
for j in xrange(1, n / 2):
if ((whitelist_flag != False) and (whitelist[i][j] == 0)) or (whitelist_flag == False):
h_theta = fabs(y[n / 2 + 1][n / 4 + 1])
h_phi = fabs(x[n / 4][j] - x[n / 4 + 1][j])
if fx[i][j] * fx[i][j + 1] < 0.0:
if fx[i][j] * fx[i + 1][j] < 0.0:
phi1a = x[i][j]
theta1a = y[i][j] + h_theta * fabs(fx[i][j]) / (fabs(fx[i][j]) + fabs(fx[i][j + 1]))
phi1b = x[i][j] + h_phi * fabs(fx[i][j]) / (fabs(fx[i][j]) + fabs(fx[i + 1][j]))
theta1b = y[i][j]
z_x[i][j] = 1
elif fx[i + 1][j] * fx[i + 1][j + 1] < 0.0:
phi1a = x[i][j]
theta1a = y[i][j] + h_theta * fabs(fx[i][j]) / (fabs(fx[i][j]) + fabs(fx[i][j + 1]))
phi1b = x[i + 1][j]
theta1b = y[i + 1][j] + h_theta * fabs(fx[i + 1][j]) / (fabs(fx[i + 1][j])
+ fabs(fx[i + 1][j + 1]))
z_x[i][j] = 1
elif fx[i][j + 1] * fx[i + 1][j + 1] < 0.0:
phi1a = x[i][j]
theta1a = y[i][j] + h_theta * fabs(fx[i][j]) / (fabs(fx[i][j]) + fabs(fx[i][j + 1]))
phi1b = x[i][j + 1] + h_phi * fabs(fx[i][j + 1]) / (fabs(fx[i][j + 1]) + fabs(fx[i + 1][j + 1]))
theta1b = y[i][j + 1]
z_x[i][j] = 1
elif fx[i][j] * fx[i + 1][j] < 0.0:
if fx[i + 1][j] * fx[i + 1][j + 1] < 0.0:
phi1a = x[i][j] + h_phi * fabs(fx[i][j]) / (fabs(fx[i][j]) + fabs(fx[i + 1][j]))
theta1a = y[i][j]
phi1b = x[i + 1][j]
theta1b = y[i + 1][j] + h_theta * fabs(fx[i + 1][j]) / (
fabs(fx[i + 1][j]) + fabs(fx[i + 1][j + 1]))
z_x[i][j] = 1
elif fx[i][j + 1] * fx[i + 1][j + 1] < 0.0:
phi1a = x[i][j] + h_phi * fabs(fx[i][j]) / (fabs(fx[i][j]) + fabs(fx[i + 1][j]))
theta1a = y[i][j]
phi1b = x[i][j + 1] + h_phi * fabs(fx[i][j + 1]) / (fabs(fx[i][j + 1]) + fabs(fx[i + 1][j + 1]))
theta1b = y[i][j + 1]
z_x[i][j] = 1
elif fx[i + 1][j] * fx[i + 1][j + 1] < 0.0:
if fx[i][j + 1] * fx[i + 1][j + 1] < 0.0:
phi1a = x[i + 1][j]
theta1a = y[i + 1][j] + h_theta * fabs(fx[i + 1][j]) / (
fabs(fx[i + 1][j]) + fabs(fx[i + 1][j + 1]))
phi1b = x[i][j + 1] + h_phi * fabs(fx[i][j + 1]) / (fabs(fx[i][j + 1]) + fabs(fx[i + 1][j + 1]))
theta1b = y[i][j + 1]
z_x[i][j] = 1
if fy[i][j] * fy[i][j + 1] < 0.0:
if fy[i][j] * fy[i + 1][j] < 0.0:
phi2a = x[i][j]
theta2a = y[i][j] + h_theta * fabs(fy[i][j]) / (fabs(fy[i][j]) + fabs(fy[i][j + 1]))
phi2b = x[i][j] + h_phi * fabs(fy[i][j]) / (fabs(fy[i][j]) + fabs(fy[i + 1][j]))
theta2b = y[i][j]
z_y[i][j] = 1
elif fy[i + 1][j] * fy[i + 1][j + 1] < 0.0:
phi2a = x[i][j]
theta2a = y[i][j] + h_theta * fabs(fy[i][j]) / (fabs(fy[i][j]) + fabs(fy[i][j + 1]))
phi2b = x[i + 1][j]
theta2b = y[i + 1][j] + h_theta * fabs(fy[i + 1][j]) / (
fabs(fy[i + 1][j]) + fabs(fy[i + 1][j + 1]))
z_y[i][j] = 1
elif fy[i][j + 1] * fy[i + 1][j + 1] < 0.0:
phi2a = x[i][j]
theta2a = y[i][j] + h_theta * fabs(fy[i][j]) / (fabs(fy[i][j]) + fabs(fy[i][j + 1]))
phi2b = x[i][j + 1] + h_phi * fabs(fy[i][j + 1]) / (fabs(fy[i][j + 1]) + fabs(fy[i + 1][j + 1]))
theta2b = y[i][j + 1]
z_y[i][j] = 1
elif fy[i][j] * fy[i + 1][j] < 0.0:
if fy[i + 1][j] * fy[i + 1][j + 1] < 0.0:
phi2a = x[i][j] + h_phi * fabs(fy[i][j]) / (fabs(fy[i][j]) + fabs(fy[i + 1][j]))
theta2a = y[i][j]
phi2b = x[i + 1][j]
theta2b = y[i + 1][j] + h_theta * fabs(fy[i + 1][j]) / (
fabs(fy[i + 1][j]) + fabs(fy[i + 1][j + 1]))
z_y[i][j] = 1
elif fy[i][j + 1] * fy[i + 1][j + 1] < 0.0:
phi2a = x[i][j] + h_phi * fabs(fy[i][j]) / (fabs(fy[i][j]) + fabs(fy[i + 1][j]))
theta2a = y[i][j]
phi2b = x[i][j + 1] + h_phi * fabs(fy[i][j + 1]) / (fabs(fy[i][j + 1]) + fabs(fy[i + 1][j + 1]))
theta2b = y[i][j + 1]
z_y[i][j] = 1
elif fy[i + 1][j] * fy[i + 1][j + 1] < 0.0:
if fy[i][j + 1] * fy[i + 1][j + 1] < 0.0:
phi2a = x[i + 1][j]
theta2a = y[i + 1][j] + h_theta * fabs(fy[i + 1][j]) / (
fabs(fy[i + 1][j]) + fabs(fy[i + 1][j + 1]))
phi2b = x[i][j + 1] + h_phi * fabs(fy[i][j + 1]) / (fabs(fy[i][j + 1]) + fabs(fy[i + 1][j + 1]))
theta2b = y[i][j + 1]
z_y[i][j] = 1
if (z_y[i][j] != 0 and z_x[i][j] != 0) and (down_bounds == False and up_bounds == False):
flag = 0
phi_precision = 0.0
theta_precision = 0.0
phi_a, theta_a = cross(phi1a, theta1a, phi1b, theta1b, phi2a, theta2a, phi2b, theta2b)
if (x[i][j] <= phi_a <= x[i + 1][j]) and (y[i][j] <= theta_a <= y[i][j + 1]):
phi_precision = phi_a
theta_precision = theta_a
flag = 1
elif (x[i][j] <= phi_a <= x[i + 1][j]) and (y[i][j] <= - theta_a <= y[i][j + 1]):
phi_precision = phi_a
theta_precision = - theta_a
flag = 1
elif (phi_a < 0) and (x[i][j] <= phi_a + pi <= x[i + 1][j]) \
and (y[i][j] <= theta_a <= y[i][j + 1]):
phi_precision = phi_a + pi
theta_precision = theta_a
flag = 1
elif (phi_a > 0) and (x[i][j] <= phi_a - pi <= x[i + 1][j]) \
and (y[i][j] <= theta_a <= y[i][j + 1]):
phi_precision = phi_a - pi
theta_precision = theta_a
flag = 1
elif (phi_a < 0) and (x[i][j] <= phi_a + pi <= x[i + 1][j]) \
and (y[i][j] <= - theta_a <= y[i][j + 1]):
phi_precision = phi_a + pi
theta_precision = - theta_a
flag = 1
elif (phi_a > 0) and (x[i][j] <= phi_a - pi <= x[i + 1][j]) \
and (y[i][j] <= - theta_a <= y[i][j + 1]):
phi_precision = phi_a - pi
theta_precision = - theta_a
flag = 1
if flag == 1:
fxx_precision = fxx[i][j] + (fxx[i + 1][j] - fxx[i][j]) * \
s2(phi_precision, x[i][j], theta_precision, theta_precision) / \
s2(x[i + 1][j], x[i][j], theta_precision, theta_precision) + \
(fxx[i][j + 1] - fxx[i][j]) * \
s2(phi_precision, phi_precision, theta_precision, y[i][j]) / \
s2(phi_precision, phi_precision, y[i][j], y[i][j + 1])
fyy_precision = fyy[i][j] + (fyy[i + 1][j] - fyy[i][j]) * \
s2(phi_precision, x[i][j], theta_precision, theta_precision) / \
s2(x[i + 1][j], x[i][j], theta_precision, theta_precision) + \
(fyy[i][j + 1] - fyy[i][j]) * \
s2(phi_precision, phi_precision, theta_precision, y[i][j]) / \
s2(phi_precision, phi_precision, y[i][j], y[i][j + 1])
fxy_precision = fxy[i][j] + (fxy[i + 1][j] - fxy[i][j]) * \
s2(phi_precision, x[i][j], theta_precision, theta_precision) / \
s2(x[i + 1][j], x[i][j], theta_precision, theta_precision) + \
(fxy[i][j + 1] - fxy[i][j]) * \
s2(phi_precision, phi_precision, theta_precision, y[i][j]) / \
s2(phi_precision, phi_precision, y[i][j], y[i][j + 1])
cond_answ = condition_1(fxx_precision, fyy_precision, fxy_precision)
if cond_answ == 0:
my_type = 'o'
g += 1
n_min += 1
ms = 15
if cond_answ == 2:
my_type = '+'
g += 1
n_max += 1
ms = 100
if cond_answ == 1:
my_type = 'x'
g -= 1
n_sad -= 1
ms = 100
if my_cmbmap:
from lib.cmbplot import point
point(my_cmbmap, phi_precision, theta_precision, ms, my_type)
if my_file:
my_file.write(repr(f[i][j]) + ' ' + repr(phi_precision) + ' ' +
repr(theta_precision) + ' ' + repr(cond_answ) + ' ' + repr(fxx_precision) + ' ' +
repr(fyy_precision) + ' ' + repr(fxy_precision) + ' ' +
repr(fxx_precision * fyy_precision - fxy_precision * fxy_precision) + '\n')
my_file.write(repr(f[i][j]) + ' ' + repr(cond_answ) + ' ' + repr(sigma_0) + ' ' +
repr(sigma_1) + ' ' + repr(sigma_2) + '\n')
if ((z_y[i][j] != 0 and z_x[i][j] != 0) and (down_bounds != False or up_bounds != False)) and (
(down_bounds <= f[i][j] <= up_bounds) or (down_bounds >= f[i][j] >= up_bounds)):
flag = 0
phi_precision = 0.0
theta_precision = 0.0
phi_a, theta_a = cross(phi1a, theta1a, phi1b, theta1b, phi2a, theta2a, phi2b, theta2b)
if (x[i][j] <= phi_a <= x[i + 1][j]) and (y[i][j] <= theta_a <= y[i][j + 1]):
phi_precision = phi_a
theta_precision = theta_a
flag = 1
elif (x[i][j] <= phi_a <= x[i + 1][j]) and (y[i][j] <= - theta_a <= y[i][j + 1]):
phi_precision = phi_a
theta_precision = - theta_a
flag = 1
elif (phi_a < 0) and (x[i][j] <= phi_a + pi <= x[i + 1][j]) \
and (y[i][j] <= theta_a <= y[i][j + 1]):
phi_precision = phi_a + pi
theta_precision = theta_a
flag = 1
elif (phi_a > 0) and (x[i][j] <= phi_a - pi <= x[i + 1][j]) \
and (y[i][j] <= theta_a <= y[i][j + 1]):
phi_precision = phi_a - pi
theta_precision = theta_a
flag = 1
elif (phi_a < 0) and (x[i][j] <= phi_a + pi <= x[i + 1][j]) \
and (y[i][j] <= - theta_a <= y[i][j + 1]):
phi_precision = phi_a + pi
theta_precision = - theta_a
flag = 1
elif (phi_a > 0) and (x[i][j] <= phi_a - pi <= x[i + 1][j]) \
and (y[i][j] <= - theta_a <= y[i][j + 1]):
phi_precision = phi_a - pi
theta_precision = - theta_a
flag = 1
if flag != 0:
fxx_precision = fxx[i][j] + (fxx[i + 1][j] - fxx[i][j]) * \
s2(phi_precision, x[i][j], theta_precision, theta_precision) / \
s2(x[i + 1][j], x[i][j], theta_precision, theta_precision) + \
(fxx[i][j + 1] - fxx[i][j]) * \
s2(phi_precision, phi_precision, theta_precision, y[i][j]) / \
s2(phi_precision, phi_precision, y[i][j], y[i][j + 1])
fyy_precision = fyy[i][j] + (fyy[i + 1][j] - fyy[i][j]) * \
s2(phi_precision, x[i][j], theta_precision, theta_precision) / \
s2(x[i + 1][j], x[i][j], theta_precision, theta_precision) + \
(fyy[i][j + 1] - fyy[i][j]) * \
s2(phi_precision, phi_precision, theta_precision, y[i][j]) / \
s2(phi_precision, phi_precision, y[i][j], y[i][j + 1])
fxy_precision = fxy[i][j] + (fxy[i + 1][j] - fxy[i][j]) * \
s2(phi_precision, x[i][j], theta_precision, theta_precision) / \
s2(x[i + 1][j], x[i][j], theta_precision, theta_precision) + \
(fxy[i][j + 1] - fxy[i][j]) * \
s2(phi_precision, phi_precision, theta_precision, y[i][j]) / \
s2(phi_precision, phi_precision, y[i][j], y[i][j + 1])
cond_answ = condition_1(fxx_precision, fyy_precision, fxy_precision)
if cond_answ == 0:
my_type = 'o'
g += 1
n_min += 1
ms = 15
if cond_answ == 2:
my_type = '+'
g += 1
n_max += 1
ms = 100
if cond_answ == 1:
my_type = 'x'
g -= 1
n_sad -= 1
ms = 100
if my_cmbmap:
from lib.cmbplot import point
point(my_cmbmap, phi_precision, theta_precision, ms, my_type)
if my_file:
my_file.write(repr(f[i][j]) + ' ' + repr(phi_precision) + ' ' +
repr(theta_precision) + ' ' + repr(cond_answ) + ' ' + repr(
fxx_precision) + ' ' +
repr(fyy_precision) + ' ' + repr(fxy_precision) + ' ' +
repr(fxx_precision * fyy_precision - fxy_precision * fxy_precision) + '\n')
my_file.write(repr(f[i][j]) + ' ' + repr(cond_answ) + ' ' + repr(sigma_0) + ' ' +
repr(sigma_1) + ' ' + repr(sigma_2) + '\n')
if down_bounds != False or up_bounds != False:
return g, n_max, n_min, n_sad
def null_points(x, y, f, fx, fy, my_file=False, my_cmbmap=False):
from numpy import zeros
from math import fabs, pi
from lib.distance import cross
n = f.shape[0] - 1
z_x = zeros((n, n / 2))
z_y = zeros((n, n / 2))
whitelist = zeros((n, n / 2))
phi1a = 0.0
phi1b = 0.0
phi2a = 0.0
phi2b = 0.0
theta1a = 0.0
theta1b = 0.0
theta2a = 0.0
theta2b = 0.0
for i in xrange(0, n):
for j in xrange(1, n / 2):
h_theta = fabs(y[n / 2 + 1][n / 4 + 1])
h_phi = fabs(x[n / 4][j] - x[n / 4 + 1][j])
if fx[i][j] * fx[i][j + 1] < 0.0:
if fx[i][j] * fx[i + 1][j] < 0.0:
phi1a = x[i][j]
theta1a = y[i][j] + h_theta * fabs(fx[i][j]) / (fabs(fx[i][j]) + fabs(fx[i][j + 1]))
phi1b = x[i][j] + h_phi * fabs(fx[i][j]) / (fabs(fx[i][j]) + fabs(fx[i + 1][j]))
theta1b = y[i][j]
z_x[i][j] = 1
elif fx[i + 1][j] * fx[i + 1][j + 1] < 0.0:
phi1a = x[i][j]
theta1a = y[i][j] + h_theta * fabs(fx[i][j]) / (fabs(fx[i][j]) + fabs(fx[i][j + 1]))
phi1b = x[i + 1][j]
theta1b = y[i + 1][j] + h_theta * fabs(fx[i + 1][j]) / (fabs(fx[i + 1][j])
+ fabs(fx[i + 1][j + 1]))
z_x[i][j] = 1
elif fx[i][j + 1] * fx[i + 1][j + 1] < 0.0:
phi1a = x[i][j]
theta1a = y[i][j] + h_theta * fabs(fx[i][j]) / (fabs(fx[i][j]) + fabs(fx[i][j + 1]))
phi1b = x[i][j + 1] + h_phi * fabs(fx[i][j + 1]) / (fabs(fx[i][j + 1]) + fabs(fx[i + 1][j + 1]))
theta1b = y[i][j + 1]
z_x[i][j] = 1
elif fx[i][j] * fx[i + 1][j] < 0.0:
if fx[i + 1][j] * fx[i + 1][j + 1] < 0.0:
phi1a = x[i][j] + h_phi * fabs(fx[i][j]) / (fabs(fx[i][j]) + fabs(fx[i + 1][j]))
theta1a = y[i][j]
phi1b = x[i + 1][j]
theta1b = y[i + 1][j] + h_theta * fabs(fx[i + 1][j]) / (
fabs(fx[i + 1][j]) + fabs(fx[i + 1][j + 1]))
z_x[i][j] = 1
elif fx[i][j + 1] * fx[i + 1][j + 1] < 0.0:
phi1a = x[i][j] + h_phi * fabs(fx[i][j]) / (fabs(fx[i][j]) + fabs(fx[i + 1][j]))
theta1a = y[i][j]
phi1b = x[i][j + 1] + h_phi * fabs(fx[i][j + 1]) / (fabs(fx[i][j + 1]) + fabs(fx[i + 1][j + 1]))
theta1b = y[i][j + 1]
z_x[i][j] = 1
elif fx[i + 1][j] * fx[i + 1][j + 1] < 0.0:
if fx[i][j + 1] * fx[i + 1][j + 1] < 0.0:
phi1a = x[i + 1][j]
theta1a = y[i + 1][j] + h_theta * fabs(fx[i + 1][j]) / (
fabs(fx[i + 1][j]) + fabs(fx[i + 1][j + 1]))
phi1b = x[i][j + 1] + h_phi * fabs(fx[i][j + 1]) / (fabs(fx[i][j + 1]) + fabs(fx[i + 1][j + 1]))
theta1b = y[i][j + 1]
z_x[i][j] = 1
if fy[i][j] * fy[i][j + 1] < 0.0:
if fy[i][j] * fy[i + 1][j] < 0.0:
phi2a = x[i][j]
theta2a = y[i][j] + h_theta * fabs(fy[i][j]) / (fabs(fy[i][j]) + fabs(fy[i][j + 1]))
phi2b = x[i][j] + h_phi * fabs(fy[i][j]) / (fabs(fy[i][j]) + fabs(fy[i + 1][j]))
theta2b = y[i][j]
z_y[i][j] = 1
elif fy[i + 1][j] * fy[i + 1][j + 1] < 0.0:
phi2a = x[i][j]
theta2a = y[i][j] + h_theta * fabs(fy[i][j]) / (fabs(fy[i][j]) + fabs(fy[i][j + 1]))
phi2b = x[i + 1][j]
theta2b = y[i + 1][j] + h_theta * fabs(fy[i + 1][j]) / (
fabs(fy[i + 1][j]) + fabs(fy[i + 1][j + 1]))
z_y[i][j] = 1
elif fy[i][j + 1] * fy[i + 1][j + 1] < 0.0:
phi2a = x[i][j]
theta2a = y[i][j] + h_theta * fabs(fy[i][j]) / (fabs(fy[i][j]) + fabs(fy[i][j + 1]))
phi2b = x[i][j + 1] + h_phi * fabs(fy[i][j + 1]) / (fabs(fy[i][j + 1]) + fabs(fy[i + 1][j + 1]))
theta2b = y[i][j + 1]
z_y[i][j] = 1
elif fy[i][j] * fy[i + 1][j] < 0.0:
if fy[i + 1][j] * fy[i + 1][j + 1] < 0.0:
phi2a = x[i][j] + h_phi * fabs(fy[i][j]) / (fabs(fy[i][j]) + fabs(fy[i + 1][j]))
theta2a = y[i][j]
phi2b = x[i + 1][j]
theta2b = y[i + 1][j] + h_theta * fabs(fy[i + 1][j]) / (
fabs(fy[i + 1][j]) + fabs(fy[i + 1][j + 1]))
z_y[i][j] = 1
elif fy[i][j + 1] * fy[i + 1][j + 1] < 0.0:
phi2a = x[i][j] + h_phi * fabs(fy[i][j]) / (fabs(fy[i][j]) + fabs(fy[i + 1][j]))
theta2a = y[i][j]
phi2b = x[i][j + 1] + h_phi * fabs(fy[i][j + 1]) / (fabs(fy[i][j + 1]) + fabs(fy[i + 1][j + 1]))
theta2b = y[i][j + 1]
z_y[i][j] = 1
elif fy[i + 1][j] * fy[i + 1][j + 1] < 0.0:
if fy[i][j + 1] * fy[i + 1][j + 1] < 0.0:
phi2a = x[i + 1][j]
theta2a = y[i + 1][j] + h_theta * fabs(fy[i + 1][j]) / (
fabs(fy[i + 1][j]) + fabs(fy[i + 1][j + 1]))
phi2b = x[i][j + 1] + h_phi * fabs(fy[i][j + 1]) / (fabs(fy[i][j + 1]) + fabs(fy[i + 1][j + 1]))
theta2b = y[i][j + 1]
z_y[i][j] = 1
if z_y[i][j] != 0 and z_x[i][j] != 0:
flag = 0
phi_precision = 0.0
theta_precision = 0.0
phi_a, theta_a = cross(phi1a, theta1a, phi1b, theta1b, phi2a, theta2a, phi2b, theta2b)
if (x[i][j] <= phi_a <= x[i + 1][j]) and (y[i][j] <= theta_a <= y[i][j + 1]):
phi_precision = phi_a
theta_precision = theta_a
flag = 1
elif (x[i][j] <= phi_a <= x[i + 1][j]) and (y[i][j] <= - theta_a <= y[i][j + 1]):
phi_precision = phi_a
theta_precision = - theta_a
flag = 1
elif (phi_a < 0) and (x[i][j] <= phi_a + pi <= x[i + 1][j]) \
and (y[i][j] <= theta_a <= y[i][j + 1]):
phi_precision = phi_a + pi
theta_precision = theta_a
flag = 1
elif (phi_a > 0) and (x[i][j] <= phi_a - pi <= x[i + 1][j]) \
and (y[i][j] <= theta_a <= y[i][j + 1]):
phi_precision = phi_a - pi
theta_precision = theta_a
flag = 1
elif (phi_a < 0) and (x[i][j] <= phi_a + pi <= x[i + 1][j]) \
and (y[i][j] <= - theta_a <= y[i][j + 1]):
phi_precision = phi_a + pi
theta_precision = - theta_a
flag = 1
elif (phi_a > 0) and (x[i][j] <= phi_a - pi <= x[i + 1][j]) \
and (y[i][j] <= - theta_a <= y[i][j + 1]):
phi_precision = phi_a - pi
theta_precision = - theta_a
flag = 1
if flag == 1:
if my_cmbmap:
from lib.cmbplot import point
ms = 4
my_type = '*'
point(my_cmbmap, phi_precision, theta_precision, ms, my_type, 'red')
if my_file:
my_file.write(repr(f[i][j]) + ' ' + repr(phi_precision) + ' ' +
repr(theta_precision) + ' ' + '\n')
whitelist[i][j] = 1
return whitelist
def singular_points(x, y, q, u, qx, qy, ux, uy, cos_coef_q, sin_coef_q, cos_coef_u, sin_coef_u, l_max_dir,
my_file=False, my_cmbmap=False, print_num=False, number=0):
from numpy import zeros
from math import fabs, pi
from lib.distance import s2, cross, restore_value_4
n = q.shape[0] - 1
g = 0.0
n_saddle = 0.0
n_beak = 0.0
n_comet = 0.0
z_x = zeros((n, n / 2))
z_y = zeros((n, n / 2))
phi1a = 0.0
phi1b = 0.0
phi2a = 0.0
phi2b = 0.0
theta1a = 0.0
theta1b = 0.0
theta2a = 0.0
theta2b = 0.0
for i in xrange(0, n):
for j in xrange(1, n / 2):
h_theta = fabs(y[n / 2 + 1][n / 4 + 1])
h_phi = fabs(x[n / 4][j] - x[n / 4 + 1][j])
if number == 0:
if u[i][j] * u[i][j + 1] < 0.0:
if u[i][j] * u[i + 1][j] < 0.0:
phi1a = x[i][j]
theta1a = y[i][j] + h_theta * fabs(u[i][j]) / (fabs(u[i][j]) + fabs(u[i][j + 1]))
phi1b = x[i][j] + h_phi * fabs(u[i][j]) / (fabs(u[i][j]) + fabs(u[i + 1][j]))
theta1b = y[i][j]
z_x[i][j] = 1
elif u[i + 1][j] * u[i + 1][j + 1] < 0.0:
phi1a = x[i][j]
theta1a = y[i][j] + h_theta * fabs(u[i][j]) / (fabs(u[i][j]) + fabs(u[i][j + 1]))
phi1b = x[i + 1][j]
theta1b = y[i + 1][j] + h_theta * fabs(u[i + 1][j]) \
/ (fabs(u[i + 1][j]) + fabs(u[i + 1][j + 1]))
z_x[i][j] = 1
elif u[i][j + 1] * u[i + 1][j + 1] < 0.0:
phi1a = x[i][j]
theta1a = y[i][j] + h_theta * fabs(u[i][j]) / (fabs(u[i][j]) + fabs(u[i][j + 1]))
phi1b = x[i][j + 1] + h_phi * fabs(u[i][j + 1]) / (fabs(u[i][j + 1]) + fabs(u[i + 1][j + 1]))
theta1b = y[i][j + 1]
z_x[i][j] = 1
elif u[i][j] * u[i + 1][j] < 0.0:
if u[i + 1][j] * u[i + 1][j + 1] < 0.0:
phi1a = x[i][j] + h_phi * fabs(u[i][j]) / (fabs(u[i][j]) + fabs(u[i + 1][j]))
theta1a = y[i][j]
phi1b = x[i + 1][j]
theta1b = y[i + 1][j] + h_theta * fabs(u[i + 1][j]) \
/ (fabs(u[i + 1][j]) + fabs(u[i + 1][j + 1]))
z_x[i][j] = 1
elif u[i][j + 1] * u[i + 1][j + 1] < 0.0:
phi1a = x[i][j] + h_phi * fabs(u[i][j]) / (fabs(u[i][j]) + fabs(u[i + 1][j]))
theta1a = y[i][j]
phi1b = x[i][j + 1] + h_phi * fabs(u[i][j + 1]) / (fabs(u[i][j + 1]) + fabs(u[i + 1][j + 1]))
theta1b = y[i][j + 1]
z_x[i][j] = 1
elif u[i + 1][j] * u[i + 1][j + 1] < 0.0:
if u[i][j + 1] * u[i + 1][j + 1] < 0.0:
phi1a = x[i + 1][j]
theta1a = y[i + 1][j] + h_theta * fabs(u[i + 1][j]) / (
fabs(u[i + 1][j]) + fabs(u[i + 1][j + 1]))
phi1b = x[i][j + 1] + h_phi * fabs(u[i][j + 1]) / (fabs(u[i][j + 1]) + fabs(u[i + 1][j + 1]))
theta1b = y[i][j + 1]
z_x[i][j] = 1
if q[i][j] * q[i][j + 1] < 0.0:
if q[i][j] * q[i + 1][j] < 0.0:
phi2a = x[i][j]
theta2a = y[i][j] + h_theta * fabs(q[i][j]) / (fabs(q[i][j]) + fabs(q[i][j + 1]))
phi2b = x[i][j] + h_phi * fabs(q[i][j]) / (fabs(q[i][j]) + fabs(q[i + 1][j]))
theta2b = y[i][j]
z_y[i][j] = 1
elif q[i + 1][j] * q[i + 1][j + 1] < 0.0:
phi2a = x[i][j]
theta2a = y[i][j] + h_theta * fabs(q[i][j]) / (fabs(q[i][j]) + fabs(q[i][j + 1]))
phi2b = x[i + 1][j]
theta2b = y[i + 1][j] + h_theta * fabs(q[i + 1][j]) \
/ (fabs(q[i + 1][j]) + fabs(q[i + 1][j + 1]))
z_y[i][j] = 1
elif q[i][j + 1] * q[i + 1][j + 1] < 0.0:
phi2a = x[i][j]
theta2a = y[i][j] + h_theta * fabs(q[i][j]) / (fabs(q[i][j]) + fabs(q[i][j + 1]))
phi2b = x[i][j + 1] + h_phi * fabs(q[i][j + 1]) / (fabs(q[i][j + 1]) + fabs(q[i + 1][j + 1]))
theta2b = y[i][j + 1]
z_y[i][j] = 1
elif q[i][j] * q[i + 1][j] < 0.0:
if q[i + 1][j] * q[i + 1][j + 1] < 0.0:
phi2a = x[i][j] + h_phi * fabs(q[i][j]) / (fabs(q[i][j]) + fabs(q[i + 1][j]))
theta2a = y[i][j]
phi2b = x[i + 1][j]
theta2b = y[i + 1][j] + h_theta * fabs(q[i + 1][j]) / \
(fabs(q[i + 1][j]) + fabs(q[i + 1][j + 1]))
z_y[i][j] = 1
elif q[i][j + 1] * q[i + 1][j + 1] < 0.0:
phi2a = x[i][j] + h_phi * fabs(q[i][j]) / (fabs(q[i][j]) + fabs(q[i + 1][j]))
theta2a = y[i][j]
phi2b = x[i][j + 1] + h_phi * fabs(q[i][j + 1]) / (fabs(q[i][j + 1]) + fabs(q[i + 1][j + 1]))
theta2b = y[i][j + 1]
z_y[i][j] = 1
elif q[i + 1][j] * q[i + 1][j + 1] < 0.0:
if q[i][j + 1] * q[i + 1][j + 1] < 0.0:
phi2a = x[i + 1][j]
theta2a = y[i + 1][j] + h_theta * fabs(q[i + 1][j]) / (
fabs(q[i + 1][j]) + fabs(q[i + 1][j + 1]))
phi2b = x[i][j + 1] + h_phi * fabs(q[i][j + 1]) / (fabs(q[i][j + 1]) + fabs(q[i + 1][j + 1]))
theta2b = y[i][j + 1]
z_y[i][j] = 1
if z_y[i][j] != 0 and z_x[i][j] != 0:
flag = 0
phi_precision = 0.0
theta_precision = 0.0
phi_a, theta_a = cross(phi1a, theta1a, phi1b, theta1b, phi2a, theta2a, phi2b, theta2b)
if (x[i][j] <= phi_a <= x[i + 1][j]) and (y[i][j] <= theta_a <= y[i][j + 1]):
phi_precision = phi_a
theta_precision = theta_a
flag = 1
elif (x[i][j] <= phi_a <= x[i + 1][j]) and (y[i][j] <= - theta_a <= y[i][j + 1]):
phi_precision = phi_a
theta_precision = - theta_a
flag = 1
elif (phi_a < 0) and (x[i][j] <= phi_a + pi <= x[i + 1][j]) \
and (y[i][j] <= theta_a <= y[i][j + 1]):
phi_precision = phi_a + pi
theta_precision = theta_a
flag = 1
elif (phi_a > 0) and (x[i][j] <= phi_a - pi <= x[i + 1][j]) \
and (y[i][j] <= theta_a <= y[i][j + 1]):
phi_precision = phi_a - pi
theta_precision = theta_a
flag = 1
elif (phi_a < 0) and (x[i][j] <= phi_a + pi <= x[i + 1][j]) \
and (y[i][j] <= - theta_a <= y[i][j + 1]):
phi_precision = phi_a + pi
theta_precision = - theta_a
flag = 1
elif (phi_a > 0) and (x[i][j] <= phi_a - pi <= x[i + 1][j]) \
and (y[i][j] <= - theta_a <= y[i][j + 1]):
phi_precision = phi_a - pi
theta_precision = - theta_a
flag = 1
if flag == 1:
qx_precision = restore_value_4(phi_precision, theta_precision,
cos_coef_q, sin_coef_q, l_max_dir, sign=1, diff=True)
qy_precision = restore_value_4(phi_precision, theta_precision,
cos_coef_q, sin_coef_q, l_max_dir, sign=2)
ux_precision = restore_value_4(phi_precision, theta_precision,
cos_coef_u, sin_coef_u, l_max_dir, sign=1, diff=True)
uy_precision = restore_value_4(phi_precision, theta_precision,
cos_coef_u, sin_coef_u, l_max_dir, sign=2)
cond_answ = condition_2(qx_precision, qy_precision, ux_precision, uy_precision)
if cond_answ == 0:
my_type = 'o'
g += 1
n_saddle += 1
ms = 15
if cond_answ == 1:
my_type = '+'
g += 1
n_beak += 1
ms = 100
if cond_answ == 2:
my_type = 'x'
g -= 1
n_comet += 1
ms = 100
if my_cmbmap:
from lib.cmbplot import point
point(my_cmbmap, phi_precision, theta_precision, ms, my_type)
if my_file:
my_file.write(repr(i) + ' ' + repr(j) + ' ' +
repr(phi_precision) + ' ' + repr(theta_precision) + ' ' +
repr(cond_answ) + ' ' + repr(qx_precision) + ' ' +
repr(qy_precision) + ' ' + repr(ux_precision) + ' ' +
repr(uy_precision) + '\n')
elif number != 0:
if u[i][j] * u[i][j + 1] < 0.0:
if u[i][j] * u[i + 1][j] < 0.0:
phi1a = x[i][j]
theta1a = y[i][j] + h_theta * fabs(u[i][j]) / (fabs(u[i][j]) + fabs(u[i][j + 1]))
phi1b = x[i][j] + h_phi * fabs(u[i][j]) / (fabs(u[i][j]) + fabs(u[i + 1][j]))
theta1b = y[i][j]
z_x[i][j] = 1
elif u[i + 1][j] * u[i + 1][j + 1] < 0.0:
phi1a = x[i][j]
theta1a = y[i][j] + h_theta * fabs(u[i][j]) / (fabs(u[i][j]) + fabs(u[i][j + 1]))
phi1b = x[i + 1][j]
theta1b = y[i + 1][j] + h_theta * fabs(u[i + 1][j]) / (
fabs(u[i + 1][j]) + fabs(u[i + 1][j + 1]))
z_x[i][j] = 1
elif u[i][j + 1] * u[i + 1][j + 1] < 0.0:
phi1a = x[i][j]
theta1a = y[i][j] + h_theta * fabs(u[i][j]) / (fabs(u[i][j]) + fabs(u[i][j + 1]))
phi1b = x[i][j + 1] + h_phi * fabs(u[i][j + 1]) / (fabs(u[i][j + 1]) + fabs(u[i + 1][j + 1]))
theta1b = y[i][j + 1]
z_x[i][j] = 1
elif u[i][j] * u[i + 1][j] < 0.0:
if u[i + 1][j] * u[i + 1][j + 1] < 0.0:
phi1a = x[i][j] + h_phi * fabs(u[i][j]) / (fabs(u[i][j]) + fabs(u[i + 1][j]))
theta1a = y[i][j]
phi1b = x[i + 1][j]
theta1b = y[i + 1][j] + h_theta * fabs(u[i + 1][j]) / (
fabs(u[i + 1][j]) + fabs(u[i + 1][j + 1]))
z_x[i][j] = 1
elif u[i][j + 1] * u[i + 1][j + 1] < 0.0:
phi1a = x[i][j] + h_phi * fabs(u[i][j]) / (fabs(u[i][j]) + fabs(u[i + 1][j]))
theta1a = y[i][j]
phi1b = x[i][j + 1] + h_phi * fabs(u[i][j + 1]) / (fabs(u[i][j + 1]) + fabs(u[i + 1][j + 1]))
theta1b = y[i][j + 1]
z_x[i][j] = 1
elif u[i + 1][j] * u[i + 1][j + 1] < 0.0:
if u[i][j + 1] * u[i + 1][j + 1] < 0.0:
phi1a = x[i + 1][j]
theta1a = y[i + 1][j] + h_theta * fabs(u[i + 1][j]) / (
fabs(u[i + 1][j]) + fabs(u[i + 1][j + 1]))
phi1b = x[i][j + 1] + h_phi * fabs(u[i][j + 1]) / (fabs(u[i][j + 1]) + fabs(u[i + 1][j + 1]))
theta1b = y[i][j + 1]
z_x[i][j] = 1
if q[i][j] * q[i][j + 1] < 0.0:
if q[i][j] * q[i + 1][j] < 0.0:
phi2a = x[i][j]
theta2a = y[i][j] + h_theta * fabs(q[i][j]) / (fabs(q[i][j]) + fabs(q[i][j + 1]))
phi2b = x[i][j] + h_phi * fabs(q[i][j]) / (fabs(q[i][j]) + fabs(q[i + 1][j]))
theta2b = y[i][j]
z_y[i][j] = 1
elif q[i + 1][j] * q[i + 1][j + 1] < 0.0:
phi2a = x[i][j]
theta2a = y[i][j] + h_theta * fabs(q[i][j]) / (fabs(q[i][j]) + fabs(q[i][j + 1]))
phi2b = x[i + 1][j]
theta2b = y[i + 1][j] + h_theta * fabs(q[i + 1][j]) / (
fabs(q[i + 1][j]) + fabs(q[i + 1][j + 1]))
z_y[i][j] = 1
elif q[i][j + 1] * q[i + 1][j + 1] < 0.0:
phi2a = x[i][j]
theta2a = y[i][j] + h_theta * fabs(q[i][j]) / (fabs(q[i][j]) + fabs(q[i][j + 1]))
phi2b = x[i][j + 1] + h_phi * fabs(q[i][j + 1]) / (fabs(q[i][j + 1]) + fabs(q[i + 1][j + 1]))
theta2b = y[i][j + 1]
z_y[i][j] = 1
elif q[i][j] * q[i + 1][j] < 0.0:
if q[i + 1][j] * q[i + 1][j + 1] < 0.0:
phi2a = x[i][j] + h_phi * fabs(q[i][j]) / (fabs(q[i][j]) + fabs(q[i + 1][j]))
theta2a = y[i][j]
phi2b = x[i + 1][j]
theta2b = y[i + 1][j] + h_theta * fabs(q[i + 1][j]) / (
fabs(q[i + 1][j]) + fabs(q[i + 1][j + 1]))
z_y[i][j] = 1
elif q[i][j + 1] * q[i + 1][j + 1] < 0.0:
phi2a = x[i][j] + h_phi * fabs(q[i][j]) / (fabs(q[i][j]) + fabs(q[i + 1][j]))
theta2a = y[i][j]
phi2b = x[i][j + 1] + h_phi * fabs(q[i][j + 1]) / (fabs(q[i][j + 1]) + fabs(q[i + 1][j + 1]))
theta2b = y[i][j + 1]
z_y[i][j] = 1
elif q[i + 1][j] * q[i + 1][j + 1] < 0.0:
if q[i][j + 1] * q[i + 1][j + 1] < 0.0:
phi2a = x[i + 1][j]
theta2a = y[i + 1][j] + h_theta * fabs(q[i + 1][j]) / (
fabs(q[i + 1][j]) + fabs(q[i + 1][j + 1]))
phi2b = x[i][j + 1] + h_phi * fabs(q[i][j + 1]) / (fabs(q[i][j + 1]) + fabs(q[i + 1][j + 1]))
theta2b = y[i][j + 1]
z_y[i][j] = 1
if z_y[i][j] != 0 and z_x[i][j] != 0:
flag = 0
phi_precision = 0.0
theta_precision = 0.0
phi_a, theta_a = cross(phi1a, theta1a, phi1b, theta1b, phi2a, theta2a, phi2b, theta2b)
if (x[i][j] <= phi_a <= x[i + 1][j]) and (y[i][j] <= theta_a <= y[i][j + 1]):
phi_precision = phi_a
theta_precision = theta_a
flag = 1
elif (x[i][j] <= phi_a <= x[i + 1][j]) and (y[i][j] <= - theta_a <= y[i][j + 1]):
phi_precision = phi_a
theta_precision = - theta_a
flag = 1
elif (phi_a < 0) and (x[i][j] <= phi_a + pi <= x[i + 1][j]) \
and (y[i][j] <= theta_a <= y[i][j + 1]):
phi_precision = phi_a + pi
theta_precision = theta_a
flag = 1
elif (phi_a > 0) and (x[i][j] <= phi_a - pi <= x[i + 1][j]) \
and (y[i][j] <= theta_a <= y[i][j + 1]):
phi_precision = phi_a - pi
theta_precision = theta_a
flag = 1
elif (phi_a < 0) and (x[i][j] <= phi_a + pi <= x[i + 1][j]) \
and (y[i][j] <= - theta_a <= y[i][j + 1]):
phi_precision = phi_a + pi
theta_precision = - theta_a
flag = 1
elif (phi_a > 0) and (x[i][j] <= phi_a - pi <= x[i + 1][j]) \
and (y[i][j] <= - theta_a <= y[i][j + 1]):
phi_precision = phi_a - pi
theta_precision = - theta_a
flag = 1
if flag == 1:
qx_precision = restore_value_4(phi_precision, theta_precision,
cos_coef_q, sin_coef_q, l_max_dir, sign=1, diff=True)
qy_precision = restore_value_4(phi_precision, theta_precision,
cos_coef_q, sin_coef_q, l_max_dir, sign=2)
ux_precision = restore_value_4(phi_precision, theta_precision,
cos_coef_u, sin_coef_u, l_max_dir, sign=1, diff=True)
uy_precision = restore_value_4(phi_precision, theta_precision,
cos_coef_u, sin_coef_u, l_max_dir, sign=2)
cond_answ = condition_2(qx_precision, qy_precision, ux_precision, uy_precision)
if cond_answ == 0:
my_type = 'o'
g += 1
n_saddle += 1
ms = 15
if cond_answ == 1:
my_type = '+'
g += 1
n_beak += 1
ms = 100
if cond_answ == 2:
my_type = 'x'
g -= 1
n_comet += 1
ms = 100
if my_cmbmap:
from lib.cmbplot import point
point(my_cmbmap, phi_precision, theta_precision, ms, my_type)
if my_file:
my_file.write(repr(i) + ' ' + repr(j) + ' ' +
repr(phi_precision) + ' ' + repr(theta_precision) + ' ' +
repr(cond_answ) + ' ' + repr(qx_precision) + ' ' +
repr(qy_precision) + ' ' + repr(ux_precision) + ' ' +
repr(uy_precision) + '\n')
number -= 1
if number == 0:
return n_saddle, n_beak, n_comet
if print_num:
return n_saddle, n_beak, n_comet
def points_comparison_single(file1, n, file_out=False, my_cmbmap=False, number_plot=0, pix=False):
from numpy import zeros, size
from math import pi
z = zeros((n, n / 2))
n_points = int(size(file1) / 9.0)
x = zeros((n + 1, n / 2 + 1))
y = zeros((n + 1, n / 2 + 1))
for i in xrange(0, n + 1):
for j in xrange(0, n / 2 + 1):
x[i][j] = (2.0 * i - n) / n * pi
y[i][j] = 2.0 * j / n * pi - pi / 2.0
for i in xrange(0, n_points):
z[int(file1[i][0]), int(file1[i][1])] = 1
for i in xrange(0, n_points):
if z[int(file1[i][0]), int(file1[i][1])] == 1:
if file1[i][4] == 0:
my_type = 'o'
my_color = 'green'
ms = 20
elif file1[i][4] == 1:
my_type = 'o'
my_color = 'blue'
ms = 20
elif file1[i][4] == 2:
my_type = 'o'
my_color = 'red'
ms = 20
if my_cmbmap:
from lib.cmbplot import point
if number_plot == 0:
point(my_cmbmap, file1[i][2], file1[i][3], ms, my_type, my_color)
elif number_plot != 0:
point(my_cmbmap, file1[i][2], file1[i][3], ms, my_type, my_color)
if pix:
point(my_cmbmap, x[int(file1[i][0])][int(file1[i][1])], y[int(file1[i][0])][int(file1[i][1])],
10, '+')
number_plot -= 1
if number_plot == 0:
break
if file_out:
file_out.write(repr(file1[i][0]) + ' ' + repr(file1[i][1]) + ' ' +
repr(file1[i][4]) + ' ' + '\n')
def points_comparison(file1, file2, n, file_out=False, my_cmbmap=False, number_plot=0, type_compare=False, pix=False):
# gap = 0
# number of points for each type
# type_compare
from numpy import zeros, size
from math import pi
z_1 = zeros((n, n / 2))
z_2 = zeros((n, n / 2))
n_points_1 = int(size(file1) / 9.0)
n_points_2 = int(size(file2) / 9.0)
x = zeros((n + 1, n / 2 + 1))
y = zeros((n + 1, n / 2 + 1))
for i in xrange(0, n + 1):
for j in xrange(0, n / 2 + 1):
x[i][j] = (2.0 * i - n) / n * pi
y[i][j] = 2.0 * j / n * pi - pi / 2.0
for i in xrange(0, n_points_1):
z_1[int(file1[i][0]), int(file1[i][1])] = 1
for i in xrange(0, n_points_2):
z_2[int(file2[i][0]), int(file2[i][1])] = 1
z = z_1 * z_2
for i in xrange(0, n_points_1):
flag = 0
if type_compare:
if file1[i][4] == file2[i][4] == 1 and z[int(file1[i][0]), int(file1[i][1])]:
flag = 1
else:
if z[int(file1[i][0]), int(file1[i][1])]:
flag = 1
if flag:
if file1[i][4] == 0:
my_type = 'o'
my_color = 'green'
ms = 5
elif file1[i][4] == 1:
my_type = 'o'
my_color = 'blue'
ms = 5
elif file1[i][4] == 2:
my_type = 'o'
my_color = 'red'
ms = 5
if my_cmbmap:
from lib.cmbplot import point
if number_plot == 0:
point(my_cmbmap, file1[i][2], file1[i][3], ms, my_type, my_color)
elif number_plot != 0:
point(my_cmbmap, file1[i][2], file1[i][3], ms, my_type, my_color)
if pix:
point(my_cmbmap, x[int(file1[i][0])][int(file1[i][1])], y[int(file1[i][0])][int(file1[i][1])],
10, '+')
number_plot -= 1
if number_plot == 0:
break
if file_out:
file_out.write(repr(file1[i][0]) + ' ' + repr(file1[i][1]) + ' ' +
repr(file1[i][4]) + ' ' + '\n')
def points_comparison_pix(file1, file2, n1, n2, file_out=False, my_cmbmap=False, number_plot=0,
type_compare=False, pix=False):
# n2 >= n1
from numpy import zeros, size
from math import pi
z_1 = zeros((n2, n2 / 2))
z_2 = zeros((n2, n2 / 2))
n_points_1 = int(size(file1) / 9.0)
n_points_2 = int(size(file2) / 9.0)
x_1 = zeros((n1 + 1, n1 / 2 + 1))
y_1 = zeros((n1 + 1, n1 / 2 + 1))
x_2 = zeros((n2 + 1, n2 / 2 + 1))
y_2 = zeros((n2 + 1, n2 / 2 + 1))
#?
print n_points_1
print n_points_2
print n1
print n2
for i in xrange(0, n1 + 1):
for j in xrange(0, n1 / 2 + 1):
x_1[i][j] = (2.0 * i - n1) / n1 * pi
y_1[i][j] = 2.0 * j / n1 * pi - pi / 2.0
for i in xrange(0, n2 + 1):
for j in xrange(0, n2 / 2 + 1):
x_2[i][j] = (2.0 * i - n2) / n2 * pi
y_2[i][j] = 2.0 * j / n2 * pi - pi / 2.0
for i in xrange(0, n_points_1):
z_1[int((n2 / n1) * file1[i][0]), int((n2 / n1) * file1[i][1])] = file1[i][4]
z_1[int((n2 / n1) * file1[i][0]) + 1, int((n2 / n1) * file1[i][1])] = file1[i][4]
z_1[int((n2 / n1) * file1[i][0]), int((n2 / n1) * file1[i][1]) + 1] = file1[i][4]
z_1[int((n2 / n1) * file1[i][0]) + 1, int((n2 / n1) * file1[i][1]) + 1] = file1[i][4]
for i in xrange(0, n_points_2):
z_2[int(file2[i][0]), int(file2[i][1])] = file2[i][4]
z = z_1 * z_2
#n_points_2 ?
for i in xrange(0, n_points_2 + 1):
flag = 0
if type_compare:
if z_1[int(file2[i][0]), int(file2[i][1])] == z_2[int(file2[i][0]), int(file2[i][1])] and z[int(file2[i][0]), int(file2[i][1])] != 0:
flag = 1
else:
if z[int(file1[i][0]), int(file1[i][1])] != 0:
flag = 1
if flag:
if file2[i][4] == 0:
my_type = 'o'
my_color = 'green'
ms = 5
elif file2[i][4] == 1:
my_type = 'o'
my_color = 'blue'
ms = 5
elif file2[i][4] == 2:
my_type = 'o'
my_color = 'red'
ms = 5
if my_cmbmap:
from lib.cmbplot import point
if number_plot == 0:
point(my_cmbmap, file2[i][2], file2[i][3], ms, my_type, my_color)
elif number_plot != 0:
point(my_cmbmap, file2[i][2], file2[i][3], ms, my_type, my_color)
if pix:
point(my_cmbmap, x_1[(n2 / n1) * int(file1[i][0]), (n2 / n1) * int(file1[i][1])],
y_1[(n2 / n1) * int(file1[i][0]), (n2 / n1) * int(file1[i][1])], 10, '+')
point(my_cmbmap, x_1[(n2 / n1) * int(file1[i][0]) + 1, (n2 / n1) * int(file1[i][1])],
y_1[(n2 / n1) * int(file1[i][0]) + 1, (n2 / n1) * int(file1[i][1])], 10, '+')
point(my_cmbmap, x_1[(n2 / n1) * int(file1[i][0]), (n2 / n1) * int(file1[i][1]) + 1],
y_1[(n2 / n1) * int(file1[i][0]), (n2 / n1) * int(file1[i][1]) + 1], 10, '+')
point(my_cmbmap, x_1[(n2 / n1) * int(file1[i][0]) + 1, (n2 / n1) * int(file1[i][1]) + 1],
y_1[(n2 / n1) * int(file1[i][0]) + 1, (n2 / n1) * int(file1[i][1])] + 1, 10, '+')
number_plot -= 1
if number_plot == 0:
break
if file_out:
file_out.write(repr(file1[i][0]) + ' ' + repr(file1[i][1]) + ' ' +
repr(file1[i][4]) + ' ' + '\n')
| 38.04443
| 145
| 0.339963
| 8,166
| 57,371
| 2.278717
| 0.018246
| 0.06868
| 0.053687
| 0.023431
| 0.941907
| 0.935834
| 0.923259
| 0.905686
| 0.895475
| 0.892519
| 0
| 0.069816
| 0.485698
| 57,371
| 1,507
| 146
| 38.069675
| 0.560835
| 0.001987
| 0
| 0.87251
| 0
| 0
| 0.004943
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.026892
| null | null | 0.005976
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
518166881338f6f7ad4f9b1ab1f51e2c06382a97
| 9,247
|
py
|
Python
|
src/cogs/image.py
|
joshuapatel/PewDiePie
|
64505c9b16229207a91b5fff808487d220241c32
|
[
"MIT"
] | 9
|
2019-02-04T14:44:07.000Z
|
2019-06-11T05:05:37.000Z
|
src/cogs/image.py
|
joshuapatel/PewDiePie
|
64505c9b16229207a91b5fff808487d220241c32
|
[
"MIT"
] | 4
|
2019-03-23T01:50:46.000Z
|
2019-05-15T22:17:42.000Z
|
src/cogs/image.py
|
joshuapatel/PewDiePie
|
64505c9b16229207a91b5fff808487d220241c32
|
[
"MIT"
] | 10
|
2019-03-19T08:09:36.000Z
|
2020-12-22T19:59:36.000Z
|
# -> Discord
import discord
from discord.ext import commands
import aiohttp
import struct
import io
import contextlib
from io import BytesIO
class Image(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def imggenembed(self, ctx, title: str, imgurl: str):
em = discord.Embed(title = title, color = discord.Color.red())
em.set_image(url=imgurl)
em.set_footer(text="NOTE: These commands are still in beta.")
await ctx.send(embed = em)
@commands.command()
async def changemymind(self, ctx, *, text: str):
check = await self.bot.pool.fetchval("SELECT level FROM donator WHERE userid = $1", ctx.author.id)
if not check:
em = discord.Embed(color=discord.Color.dark_teal())
em.add_field(name="Donator Command", value=f"This is a patreon only command. To become a supporter, go [here](https://patreon.com/pdpbot).")
await ctx.send(embed=em)
return
await ctx.channel.trigger_typing()
headers = {
"Content-Type": "application/json",
"Accept": "application/json"
}
async with aiohttp.ClientSession() as session:
async with session.get(f'https://nekobot.xyz/api/imagegen?type=changemymind&text={text}'.replace(" ", "%20"), headers=headers) as r:
raw = await r.json()
img = raw['message']
await self.imggenembed(ctx, "Change My Mind", img)
@commands.command()
async def trumptweet(self, ctx, *, text: str):
check = await self.bot.pool.fetchval("SELECT level FROM donator WHERE userid = $1", ctx.author.id)
if not check:
em = discord.Embed(color=discord.Color.dark_teal())
em.add_field(name="Donator Command", value=f"This is a patreon only command. To become a supporter, go [here](https://patreon.com/pdpbot).")
await ctx.send(embed=em)
return
await ctx.channel.trigger_typing()
headers = {
"Content-Type": "application/json",
"Accept": "application/json"
}
async with aiohttp.ClientSession() as session:
async with session.get(f'https://nekobot.xyz/api/imagegen?type=trumptweet&text={text}'.replace(" ", "%20"), headers=headers) as r:
raw = await r.json()
img = raw['message']
await self.imggenembed(ctx, "Trump Tweet", img)
@commands.command()
async def triggered(self, ctx, user: discord.Member = None):
check = await self.bot.pool.fetchval("SELECT level FROM donator WHERE userid = $1", ctx.author.id)
if not check:
em = discord.Embed(color=discord.Color.dark_teal())
em.add_field(name="Donator Command", value=f"This is a patreon only command. To become a supporter, go [here](https://patreon.com/pdpbot).")
await ctx.send(embed=em)
return
if user == None:
user = ctx.author
dmapikey = await self.bot.pool.fetchval("SELECT key FROM apikeys WHERE name = $1", "dankmemer")
if dmapikey == None:
await ctx.send("The Dank Memer API key has not been set.")
return
await ctx.channel.trigger_typing()
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": dmapikey
}
async with aiohttp.ClientSession() as session:
async with session.get(f'https://dankmemer.services/api/trigger?avatar1={user.avatar_url}', headers=headers) as r:
resp = await r.content.read()
b = io.BytesIO(resp)
f = discord.File(b, filename="triggered.gif")
await ctx.send(file=f)
@commands.command(aliases = ["merica"])
async def america(self, ctx, user: discord.Member = None):
check = await self.bot.pool.fetchval("SELECT level FROM donator WHERE userid = $1", ctx.author.id)
if not check:
em = discord.Embed(color=discord.Color.dark_teal())
em.add_field(name="Donator Command", value=f"This is a patreon only command. To become a supporter, go [here](https://patreon.com/pdpbot).")
await ctx.send(embed=em)
return
if user == None:
user = ctx.author
dmapikey = await self.bot.pool.fetchval("SELECT key FROM apikeys WHERE name = $1", "dankmemer")
if dmapikey == None:
await ctx.send("The Dank Memer API key has not been set.")
return
await ctx.channel.trigger_typing()
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": dmapikey
}
async with aiohttp.ClientSession() as session:
async with session.get(f'https://dankmemer.services/api/america?avatar1={user.avatar_url}', headers=headers) as r:
resp = await r.content.read()
b = io.BytesIO(resp)
f = discord.File(b, filename="america.gif")
await ctx.send(file=f)
@commands.command()
async def salty(self, ctx, user: discord.Member = None):
check = await self.bot.pool.fetchval("SELECT level FROM donator WHERE userid = $1", ctx.author.id)
if not check:
em = discord.Embed(color=discord.Color.dark_teal())
em.add_field(name="Donator Command", value=f"This is a patreon only command. To become a supporter, go [here](https://patreon.com/pdpbot).")
await ctx.send(embed=em)
return
if user == None:
user = ctx.author
dmapikey = await self.bot.pool.fetchval("SELECT key FROM apikeys WHERE name = $1", "dankmemer")
if dmapikey == None:
await ctx.send("The Dank Memer API key has not been set.")
return
await ctx.channel.trigger_typing()
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": dmapikey
}
async with aiohttp.ClientSession() as session:
async with session.get(f'https://dankmemer.services/api/salty?avatar1={user.avatar_url}', headers=headers) as r:
resp = await r.content.read()
b = io.BytesIO(resp)
f = discord.File(b, filename="salty.gif")
await ctx.send(file=f)
@commands.command()
async def wanted(self, ctx, user: discord.Member = None):
check = await self.bot.pool.fetchval("SELECT level FROM donator WHERE userid = $1", ctx.author.id)
if not check:
em = discord.Embed(color=discord.Color.dark_teal())
em.add_field(name="Donator Command", value=f"This is a patreon only command. To become a supporter, go [here](https://patreon.com/pdpbot).")
await ctx.send(embed=em)
return
if user == None:
user = ctx.author
dmapikey = await self.bot.pool.fetchval("SELECT key FROM apikeys WHERE name = $1", "dankmemer")
if dmapikey == None:
await ctx.send("The Dank Memer API key has not been set.")
return
await ctx.channel.trigger_typing()
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": dmapikey
}
async with aiohttp.ClientSession() as session:
async with session.get(f'https://dankmemer.services/api/wanted?avatar1={user.avatar_url}', headers=headers) as r:
resp = await r.content.read()
b = io.BytesIO(resp)
f = discord.File(b, filename="wanted.png")
await ctx.send(file=f)
@commands.command()
async def gay(self, ctx, user: discord.Member = None):
check = await self.bot.pool.fetchval("SELECT level FROM donator WHERE userid = $1", ctx.author.id)
if not check:
em = discord.Embed(color=discord.Color.dark_teal())
em.add_field(name="Donator Command", value=f"This is a patreon only command. To become a supporter, go [here](https://patreon.com/pdpbot).")
await ctx.send(embed=em)
return
if user == None:
user = ctx.author
dmapikey = await self.bot.pool.fetchval("SELECT key FROM apikeys WHERE name = $1", "dankmemer")
if dmapikey == None:
await ctx.send("The Dank Memer API key has not been set.")
return
await ctx.channel.trigger_typing()
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": dmapikey
}
async with aiohttp.ClientSession() as session:
async with session.get(f'https://dankmemer.services/api/gay?avatar1={user.avatar_url}', headers=headers) as r:
resp = await r.content.read()
b = io.BytesIO(resp)
f = discord.File(b, filename="gay.png")
await ctx.send(file=f)
def setup(bot):
bot.add_cog(Image(bot))
| 42.810185
| 152
| 0.58884
| 1,139
| 9,247
| 4.751536
| 0.119403
| 0.036955
| 0.039911
| 0.035477
| 0.900407
| 0.887288
| 0.883038
| 0.883038
| 0.876571
| 0.86918
| 0
| 0.003202
| 0.290689
| 9,247
| 216
| 153
| 42.810185
| 0.821924
| 0.001081
| 0
| 0.754098
| 0
| 0.038251
| 0.269489
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010929
| false
| 0
| 0.038251
| 0
| 0.120219
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
51a5b6d5a5020edb5d2fe6ad46ee84ed0feb2464
| 4,224
|
py
|
Python
|
tests/nla_metric_test.py
|
pdasigi/nla-semparse
|
5a733d1ca00abc86d21c88f7fa182887766f889f
|
[
"MIT"
] | null | null | null |
tests/nla_metric_test.py
|
pdasigi/nla-semparse
|
5a733d1ca00abc86d21c88f7fa182887766f889f
|
[
"MIT"
] | null | null | null |
tests/nla_metric_test.py
|
pdasigi/nla-semparse
|
5a733d1ca00abc86d21c88f7fa182887766f889f
|
[
"MIT"
] | null | null | null |
from nla_semparse.nla_metric import NlaMetric
def test_metric_basic():
metric = NlaMetric()
metric([['2']], [['2']])
assert metric.get_metric() == {"well_formedness": 1.0,
"denotation_accuracy": 1.0,
"sequence_accuracy": 1.0}
metric.reset()
def test_metric_one_operation():
metric = NlaMetric()
metric([['(', '+', '2', '3', ')']], [['(', '+', '2', '3', ')']])
assert metric.get_metric() == {"well_formedness": 1.0,
"denotation_accuracy": 1.0,
"sequence_accuracy": 1.0}
metric.reset()
metric([['(', '+', '2', '3', ')']], [['5']])
assert metric.get_metric() == {"well_formedness": 1.0,
"denotation_accuracy": 1.0,
"sequence_accuracy": 0.0}
metric.reset()
metric([['(', '+', '2', '3', ')']], [['(', '+', '1', '4', ')']])
assert metric.get_metric() == {"well_formedness": 1.0,
"denotation_accuracy": 1.0,
"sequence_accuracy": 0.0}
metric.reset()
metric([['(', '+', '2', '3', ')']], [['(', '-', '1', '4', ')']])
assert metric.get_metric() == {"well_formedness": 1.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0}
metric.reset()
def test_metric_ill_formed_sequences():
metric = NlaMetric()
metric([['(', '+', '2', ')']], [['(', '+', '2', '3', ')']])
assert metric.get_metric() == {"well_formedness": 0.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0}
metric.reset()
metric([['(', '+', ')', ')']], [['(', '+', '2', '3', ')']])
assert metric.get_metric() == {"well_formedness": 0.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0}
metric.reset()
metric([['(', ')']], [['(', '+', '2', '3', ')']])
assert metric.get_metric() == {"well_formedness": 0.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0}
metric.reset()
def test_metric_real_cases():
predictions1 = [['(', '-', '(', '*', '(', '(', '(', '(', '(',
'(', '(', '(', '(', '(', ')', ')', ')', ')', ')', ')'],
['(', '-', '(', '+', '(', '(', '*', '(', '(',
'(', '(', ')', ')', ')', ')', ')', ')', ')', ')', ')']]
predictions2 = [['132'], ['9']]
predictions3 = [['(', '-', '(', '*', '(', '(', '(', '(', '(',
'(', '(', '(', '(', '(', ')', ')', ')', ')', ')', ')'],
['9']]
targets = [['(', '+', '(', '+', '(', '*', '5', '2', ')', '(',
'/', '2', '7', ')', ')', '(', '+', '(', '+', '7',
'7', ')', '(', '*', '3', '(', '*', '6', '6', ')', ')', ')', ')'],
['(', '-', '(', '+', '8', '7', ')', '(', '-', '(',
'+', '(', '+', '6', '(', '/', '7', '7', ')', ')', '7',
')', '(', '*', '(', '/', '5', '4', ')', '8', ')', ')', ')']]
metric = NlaMetric()
metric(predictions1, targets)
assert metric.get_metric() == {"well_formedness": 0.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0}
metric.reset()
metric(predictions2, targets)
assert metric.get_metric() == {"well_formedness": 1.0,
"denotation_accuracy": 1.0,
"sequence_accuracy": 0.0}
metric.reset()
metric(predictions3, targets)
assert metric.get_metric() == {"well_formedness": 0.5,
"denotation_accuracy": 0.5,
"sequence_accuracy": 0.0}
metric.reset()
metric(targets, targets)
assert metric.get_metric() == {"well_formedness": 1.0,
"denotation_accuracy": 1.0,
"sequence_accuracy": 1.0}
metric.reset()
| 45.913043
| 81
| 0.360322
| 320
| 4,224
| 4.5625
| 0.121875
| 0.024658
| 0.09589
| 0.172603
| 0.814384
| 0.783562
| 0.769178
| 0.745205
| 0.709589
| 0.709589
| 0
| 0.046887
| 0.368845
| 4,224
| 91
| 82
| 46.417582
| 0.50075
| 0
| 0
| 0.626506
| 0
| 0
| 0.190814
| 0
| 0
| 0
| 0
| 0
| 0.144578
| 1
| 0.048193
| false
| 0
| 0.012048
| 0
| 0.060241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
51a7747c9987c2f9739d1f471ee14e4af7a5a01d
| 32,235
|
py
|
Python
|
tests/unit/query_builder_filters_test.py
|
eht16/lstail
|
8fb61e9d07b05b27e3d45e988afe0c198010248d
|
[
"MIT"
] | 5
|
2021-03-10T18:34:16.000Z
|
2021-09-23T15:57:32.000Z
|
tests/unit/query_builder_filters_test.py
|
eht16/lstail
|
8fb61e9d07b05b27e3d45e988afe0c198010248d
|
[
"MIT"
] | null | null | null |
tests/unit/query_builder_filters_test.py
|
eht16/lstail
|
8fb61e9d07b05b27e3d45e988afe0c198010248d
|
[
"MIT"
] | 2
|
2021-05-04T11:21:58.000Z
|
2021-11-03T13:44:54.000Z
|
# -*- coding: utf-8 -*-
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
from lstail.constants import FILTER_GROUP_MUST, FILTER_GROUP_MUST_NOT
from lstail.query.elasticsearch_2 import ElasticSearch2QueryBuilder
from lstail.query.elasticsearch_6 import ElasticSearch6QueryBuilder
from lstail.query.elasticsearch_7 import ElasticSearch7QueryBuilder
from tests.base import BaseTestCase, mock
# pylint: disable=protected-access,too-many-public-methods
class QueryBuilderCustomSearchTest(BaseTestCase):
# ----------------------------------------------------------------------
def test_filter_v6_phrase(self):
query_builder = self._factor_query_builder(ElasticSearch6QueryBuilder)
kibana_saved_search_filter_phrase = {'$state': {'store': 'appState'},
'meta': {'alias': 'filter_is', 'disabled': False, 'index': 'foo', 'key': 'action',
'negate': False, 'params': {'query': 'test', 'type': 'phrase'},
'type': 'phrase', 'value': 'test'},
'query': {'match': {'action': {'query': 'test', 'type': 'phrase'}}}}
expected_result = [{'match_phrase': {'action': {'query': 'test'}}}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_phrase):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def _factor_query_builder(self, builder_class):
mocked_handler = mock.MagicMock()
mocked_logger = mock.MagicMock()
query_builder = builder_class(
config_index_name='foo',
kibana_index_name='bar',
saved_search_title='foo',
custom_search='bar',
http_handler=mocked_handler,
logger=mocked_logger)
return query_builder
# ----------------------------------------------------------------------
def test_filter_v6_empty(self):
query_builder = self._factor_query_builder(ElasticSearch6QueryBuilder)
kibana_saved_search_filter_phrase = {}
expected_result = []
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_phrase):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v6_phrase_not(self):
query_builder = self._factor_query_builder(ElasticSearch6QueryBuilder)
kibana_saved_search_filter_phrase = {'$state': {'store': 'appState'},
'meta': {'alias': 'isnot', 'disabled': False, 'index': 'foo', 'key': 'dns.type',
'negate': True, 'params': {'query': 'B', 'type': 'phrase'},
'type': 'phrase', 'value': 'bar'},
'query': {'match': {'dns.type': {'query': 'B', 'type': 'phrase'}}}}
expected_result = [{'match_phrase': {'dns.type': {'query': 'bar'}}}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_phrase):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v6_phrase_disabled(self):
query_builder = self._factor_query_builder(ElasticSearch6QueryBuilder)
kibana_saved_search_filter_phrase = {'$state': {'store': 'appState'},
'meta': {'alias': 'isnot', 'disabled': True, 'index': 'foo', 'key': 'dns.type',
'negate': True, 'params': {'query': 'B', 'type': 'phrase'},
'type': 'phrase', 'value': 'bar'},
'query': {'match': {'dns.type': {'query': 'B', 'type': 'phrase'}}}}
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_phrase):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v6_phrases(self):
query_builder = self._factor_query_builder(ElasticSearch6QueryBuilder)
kibana_saved_search_filter_phrase = {'$state': {'store': 'appState'},
'meta': {'alias': None, 'disabled': False, 'index': 'foo', 'key': 'dns.type',
'negate': False, 'params': ['A', 'PTR', 'AAAA'], 'type': 'phrases',
'value': 'A, PTR, AAAA'}, 'query': {'bool': {}}}
expected_result = [{'bool': {'minimum_should_match': 1, 'should':
[{'match_phrase': {'dns.type': 'A'}}, {'match_phrase': {'dns.type': 'PTR'}},
{'match_phrase': {'dns.type': 'AAAA'}}]}}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_phrase):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v6_phrases_not(self):
query_builder = self._factor_query_builder(ElasticSearch6QueryBuilder)
kibana_saved_search_filter_phrase = {'$state': {'store': 'appState'},
'meta': {'alias': 'isnotoneof', 'disabled': False, 'index': 'foo', 'key': 'dns.type',
'negate': True, 'params': ['MX', 'NS'], 'type': 'phrases', 'value': 'MX, NS'},
'query': {'bool': {}}}
expected_result = [{'bool': {'minimum_should_match': 1, 'should':
[{'match_phrase': {'dns.type': 'MX'}}, {'match_phrase': {'dns.type': 'NS'}}]}}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_phrase):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v6_phrases_disabled(self):
query_builder = self._factor_query_builder(ElasticSearch6QueryBuilder)
kibana_saved_search_filter_phrase = {'$state': {'store': 'appState'},
'meta': {'alias': 'isnotoneof', 'disabled': True, 'index': 'foo', 'key': 'dns.type',
'negate': True, 'params': ['MX', 'NS'], 'type': 'phrases', 'value': 'MX, NS'},
'query': {'bool': {}}}
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_phrase):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v6_exists(self):
query_builder = self._factor_query_builder(ElasticSearch6QueryBuilder)
kibana_saved_search_filter_exists = {'$state': {'store': 'appState'},
'exists': {'field': 'dns.query'}, 'meta': {'alias': None, 'disabled': False,
'index': 'foo', 'key': 'dns.query', 'negate': False,
'type': 'exists', 'value': 'exists'}}
expected_result = [{'exists': {'field': 'dns.query'}}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_exists):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v6_exists_not(self):
query_builder = self._factor_query_builder(ElasticSearch6QueryBuilder)
kibana_saved_search_filter_exists = {'$state': {'store': 'appState'},
'exists': {'field': 'dns.query'}, 'meta': {'alias': None, 'disabled': False,
'index': 'foo', 'key': 'dns.query', 'negate': True,
'type': 'exists', 'value': 'exists'}}
expected_result = [{'exists': {'field': 'dns.query'}}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_exists):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v6_exists_disabled(self):
query_builder = self._factor_query_builder(ElasticSearch6QueryBuilder)
kibana_saved_search_filter_exists = {'$state': {'store': 'appState'},
'exists': {'field': 'dns.query'}, 'meta': {'alias': None, 'disabled': True,
'index': 'foo', 'key': 'dns.query', 'negate': True,
'type': 'exists', 'value': 'exists'}}
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_exists):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v6_range(self):
query_builder = self._factor_query_builder(ElasticSearch6QueryBuilder)
kibana_saved_search_filter_range = {'$state': {'store': 'appState'}, 'meta': {
'alias': 'isbetween', 'disabled': False, 'index': 'foo', 'key': 'dns.client_ip',
'negate': False, 'params': {'gte': '0.0.0.0', 'lt': '255.255.255.255'},
'type': 'range', 'value': '0.0.0.0 to 255.255.255.255'}, 'range': {
'dns.client_ip': {'gte': '0.0.0.0', 'lt': '255.255.255.255'}}}
expected_result = [{'range': {'dns.client_ip':
{'gte': '0.0.0.0', 'lt': '255.255.255.255'}}}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_range):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v6_range_not(self):
query_builder = self._factor_query_builder(ElasticSearch6QueryBuilder)
kibana_saved_search_filter_range = {'$state': {'store': 'appState'}, 'meta': {
'alias': 'isbetween', 'disabled': False, 'index': 'foo', 'key': 'dns.client_ip',
'negate': True, 'params': {'gte': '0.0.0.0', 'lt': '255.255.255.255'},
'type': 'range', 'value': '0.0.0.0 to 255.255.255.255'}, 'range': {
'dns.client_ip': {'gte': '0.0.0.0', 'lt': '255.255.255.255'}}}
expected_result = [{'range': {'dns.client_ip':
{'gte': '0.0.0.0', 'lt': '255.255.255.255'}}}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_range):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v6_range_disabled(self):
query_builder = self._factor_query_builder(ElasticSearch6QueryBuilder)
kibana_saved_search_filter_range = {'$state': {'store': 'appState'}, 'meta': {
'alias': 'isbetween', 'disabled': True, 'index': 'foo', 'key': 'dns.client_ip',
'negate': False, 'params': {'gte': '0.0.0.0', 'lt': '255.255.255.255'},
'type': 'range', 'value': '0.0.0.0 to 255.255.255.255'}, 'range': {
'dns.client_ip': {'gte': '0.0.0.0', 'lt': '255.255.255.255'}}}
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_range):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v7_phrase(self):
query_builder = self._factor_query_builder(ElasticSearch7QueryBuilder)
kibana_saved_search_filter_phrase = {'$state': {'store': 'appState'},
'meta': {'alias': 'filter_is', 'disabled': False, 'index': 'foo', 'key': 'action',
'negate': False, 'params': {'query': 'test', 'type': 'phrase'},
'type': 'phrase', 'value': 'test'},
'query': {'match': {'action': {'query': 'test', 'type': 'phrase'}}}}
expected_result = [{'match_phrase': {'action': {'query': 'test'}}}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_phrase):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v7_phrase_without_value(self):
query_builder = self._factor_query_builder(ElasticSearch7QueryBuilder)
kibana_saved_search_filter_phrase = {'$state': {'store': 'appState'},
'meta': {'alias': None, 'disabled': False, 'indexRefName': 'foo', 'key': 'action',
'negate': False, 'params': {'query': 'test'},
'type': 'phrase'},
'query': {'match': {'action': {'query': 'test', 'type': 'phrase'}}}}
expected_result = [{'match_phrase': {'action': {'query': 'test'}}}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_phrase):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v7_empty(self):
query_builder = self._factor_query_builder(ElasticSearch7QueryBuilder)
kibana_saved_search_filter_phrase = {}
expected_result = []
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_phrase):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v7_phrase_not(self):
query_builder = self._factor_query_builder(ElasticSearch7QueryBuilder)
kibana_saved_search_filter_phrase = {'$state': {'store': 'appState'},
'meta': {'alias': 'isnot', 'disabled': False, 'index': 'foo', 'key': 'dns.type',
'negate': True, 'params': {'query': 'B', 'type': 'phrase'},
'type': 'phrase', 'value': 'bar'},
'query': {'match': {'dns.type': {'query': 'B', 'type': 'phrase'}}}}
expected_result = [{'match_phrase': {'dns.type': {'query': 'bar'}}}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_phrase):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v7_phrase_disabled(self):
query_builder = self._factor_query_builder(ElasticSearch7QueryBuilder)
kibana_saved_search_filter_phrase = {'$state': {'store': 'appState'},
'meta': {'alias': 'isnot', 'disabled': True, 'index': 'foo', 'key': 'dns.type',
'negate': True, 'params': {'query': 'B', 'type': 'phrase'},
'type': 'phrase', 'value': 'bar'},
'query': {'match': {'dns.type': {'query': 'B', 'type': 'phrase'}}}}
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_phrase):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v7_phrases(self):
query_builder = self._factor_query_builder(ElasticSearch7QueryBuilder)
kibana_saved_search_filter_phrase = {'$state': {'store': 'appState'},
'meta': {'alias': None, 'disabled': False, 'index': 'foo', 'key': 'dns.type',
'negate': False, 'params': ['A', 'PTR', 'AAAA'], 'type': 'phrases',
'value': 'A, PTR, AAAA'}, 'query': {'bool': {}}}
expected_result = [{'bool': {'minimum_should_match': 1, 'should':
[{'match_phrase': {'dns.type': 'A'}}, {'match_phrase': {'dns.type': 'PTR'}},
{'match_phrase': {'dns.type': 'AAAA'}}]}}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_phrase):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v7_phrases_not(self):
query_builder = self._factor_query_builder(ElasticSearch7QueryBuilder)
kibana_saved_search_filter_phrase = {'$state': {'store': 'appState'},
'meta': {'alias': 'isnotoneof', 'disabled': False, 'index': 'foo', 'key': 'dns.type',
'negate': True, 'params': ['MX', 'NS'], 'type': 'phrases', 'value': 'MX, NS'},
'query': {'bool': {}}}
expected_result = [{'bool': {'minimum_should_match': 1, 'should':
[{'match_phrase': {'dns.type': 'MX'}}, {'match_phrase': {'dns.type': 'NS'}}]}}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_phrase):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v7_phrases_disabled(self):
query_builder = self._factor_query_builder(ElasticSearch7QueryBuilder)
kibana_saved_search_filter_phrase = {'$state': {'store': 'appState'},
'meta': {'alias': 'isnotoneof', 'disabled': True, 'index': 'foo', 'key': 'dns.type',
'negate': True, 'params': ['MX', 'NS'], 'type': 'phrases', 'value': 'MX, NS'},
'query': {'bool': {}}}
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_phrase):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v7_exists(self):
query_builder = self._factor_query_builder(ElasticSearch7QueryBuilder)
kibana_saved_search_filter_exists = {'$state': {'store': 'appState'},
'exists': {'field': 'dns.query'}, 'meta': {'alias': None, 'disabled': False,
'index': 'foo', 'key': 'dns.query', 'negate': False,
'type': 'exists', 'value': 'exists'}}
expected_result = [{'exists': {'field': 'dns.query'}}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_exists):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v7_exists_not(self):
query_builder = self._factor_query_builder(ElasticSearch7QueryBuilder)
kibana_saved_search_filter_exists = {'$state': {'store': 'appState'},
'exists': {'field': 'dns.query'}, 'meta': {'alias': None, 'disabled': False,
'index': 'foo', 'key': 'dns.query', 'negate': True,
'type': 'exists', 'value': 'exists'}}
expected_result = [{'exists': {'field': 'dns.query'}}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_exists):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v7_exists_disabled(self):
query_builder = self._factor_query_builder(ElasticSearch7QueryBuilder)
kibana_saved_search_filter_exists = {'$state': {'store': 'appState'},
'exists': {'field': 'dns.query'}, 'meta': {'alias': None, 'disabled': True,
'index': 'foo', 'key': 'dns.query', 'negate': True,
'type': 'exists', 'value': 'exists'}}
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_exists):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v7_range(self):
query_builder = self._factor_query_builder(ElasticSearch7QueryBuilder)
kibana_saved_search_filter_range = {'$state': {'store': 'appState'}, 'meta': {
'alias': 'isbetween', 'disabled': False, 'index': 'foo', 'key': 'dns.client_ip',
'negate': False, 'params': {'gte': '0.0.0.0', 'lt': '255.255.255.255'},
'type': 'range', 'value': '0.0.0.0 to 255.255.255.255'}, 'range': {
'dns.client_ip': {'gte': '0.0.0.0', 'lt': '255.255.255.255'}}}
expected_result = [{'range': {'dns.client_ip':
{'gte': '0.0.0.0', 'lt': '255.255.255.255'}}}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_range):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v7_range_not(self):
query_builder = self._factor_query_builder(ElasticSearch7QueryBuilder)
kibana_saved_search_filter_range = {'$state': {'store': 'appState'}, 'meta': {
'alias': 'isbetween', 'disabled': False, 'index': 'foo', 'key': 'dns.client_ip',
'negate': True, 'params': {'gte': '0.0.0.0', 'lt': '255.255.255.255'},
'type': 'range', 'value': '0.0.0.0 to 255.255.255.255'}, 'range': {
'dns.client_ip': {'gte': '0.0.0.0', 'lt': '255.255.255.255'}}}
expected_result = [{'range': {'dns.client_ip':
{'gte': '0.0.0.0', 'lt': '255.255.255.255'}}}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_range):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v7_range_disabled(self):
query_builder = self._factor_query_builder(ElasticSearch7QueryBuilder)
kibana_saved_search_filter_range = {'$state': {'store': 'appState'}, 'meta': {
'alias': 'isbetween', 'disabled': True, 'index': 'foo', 'key': 'dns.client_ip',
'negate': False, 'params': {'gte': '0.0.0.0', 'lt': '255.255.255.255'},
'type': 'range', 'value': '0.0.0.0 to 255.255.255.255'}, 'range': {
'dns.client_ip': {'gte': '0.0.0.0', 'lt': '255.255.255.255'}}}
# test
with mock.patch.object(query_builder, '_filter', new=kibana_saved_search_filter_range):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v4_empty(self):
query_builder = self._factor_query_builder(ElasticSearch2QueryBuilder)
kibana4_filter = {}
# test
with mock.patch.object(query_builder, '_filter', new=kibana4_filter):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v4(self):
query_builder = self._factor_query_builder(ElasticSearch2QueryBuilder)
kibana4_filter = {'$state': {'store': 'appState'}, 'meta': {
'alias': None, 'disabled': False, 'index': 'logstash-*', 'key': 'applicationName',
'negate': False, 'value': 'Webserver'},
'query': {'match': {'applicationName': {'query': 'Webserver', 'type': 'phrase'}}}}
expected_result = [{'query': kibana4_filter['query']}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana4_filter):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, expected_result)
# ----------------------------------------------------------------------
def test_filter_v4_not(self):
query_builder = self._factor_query_builder(ElasticSearch2QueryBuilder)
kibana4_filter = {'$state': {'store': 'appState'}, 'meta': {
'alias': None, 'disabled': False, 'index': 'logstash-*', 'key': 'applicationName',
'negate': True, 'value': 'Webserver'},
'query': {'match': {'applicationName': {'query': 'Webserver', 'type': 'phrase'}}}}
expected_result = [{'query': kibana4_filter['query']}]
# test
with mock.patch.object(query_builder, '_filter', new=kibana4_filter):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, expected_result)
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
# ----------------------------------------------------------------------
def test_filter_v4_disabled(self):
query_builder = self._factor_query_builder(ElasticSearch2QueryBuilder)
kibana4_filter = {'$state': {'store': 'appState'}, 'meta': {
'alias': None, 'disabled': True, 'index': 'logstash-*', 'key': 'applicationName',
'negate': True, 'value': 'Webserver'},
'query': {'match': {'applicationName': {'query': 'Webserver', 'type': 'phrase'}}}}
# test
with mock.patch.object(query_builder, '_filter', new=kibana4_filter):
query_builder._setup_filter_mapping()
query_builder._factor_filter()
result = query_builder._filters[FILTER_GROUP_MUST_NOT]
self.assertEqual(result, [])
result = query_builder._filters[FILTER_GROUP_MUST]
self.assertEqual(result, [])
| 54.821429
| 99
| 0.569877
| 3,191
| 32,235
| 5.403322
| 0.041993
| 0.153114
| 0.055678
| 0.089897
| 0.956154
| 0.954414
| 0.954182
| 0.954182
| 0.951978
| 0.94989
| 0
| 0.017323
| 0.221002
| 32,235
| 587
| 100
| 54.914821
| 0.669308
| 0.08134
| 0
| 0.881319
| 0
| 0
| 0.163553
| 0
| 0
| 0
| 0
| 0
| 0.136264
| 1
| 0.07033
| false
| 0
| 0.010989
| 0
| 0.085714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
51c0e6bee2e820d7b01f2280b58a270ac9515f4c
| 14,005
|
py
|
Python
|
usaspending_api/search/tests/test_spending_by_award_type.py
|
truthiswill/usaspending-api
|
bd7d915442e2ec94cc830c480ceeffd4479be6c0
|
[
"CC0-1.0"
] | null | null | null |
usaspending_api/search/tests/test_spending_by_award_type.py
|
truthiswill/usaspending-api
|
bd7d915442e2ec94cc830c480ceeffd4479be6c0
|
[
"CC0-1.0"
] | 1
|
2021-11-15T17:53:27.000Z
|
2021-11-15T17:53:27.000Z
|
usaspending_api/search/tests/test_spending_by_award_type.py
|
truthiswill/usaspending-api
|
bd7d915442e2ec94cc830c480ceeffd4479be6c0
|
[
"CC0-1.0"
] | null | null | null |
import json
import pytest
from rest_framework import status
from usaspending_api.common.helpers.unit_test_helper import add_to_mock_objects
from usaspending_api.search.tests.test_mock_data_search import all_filters
from django_mock_queries.query import MockModel
@pytest.mark.django_db
def test_spending_by_award_type_success(client, refresh_matviews):
# test small request
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Award ID", "Recipient Name"],
"filters": {
"award_type_codes": ["A", "B", "C"]
}
}))
assert resp.status_code == status.HTTP_200_OK
# test IDV award types
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Award ID", "Recipient Name"],
"filters": {
"award_type_codes": ["IDV_A", "IDV_B", "IDV_B_A", "IDV_B_B", "IDV_B_C", "IDV_C", "IDV_D", "IDV_E"]
}
}))
assert resp.status_code == status.HTTP_200_OK
# test all features
resp = client.post(
'/api/v2/search/spending_by_award',
content_type='application/json',
data=json.dumps({
"fields": ["Award ID", "Recipient Name"],
"filters": all_filters()
}))
assert resp.status_code == status.HTTP_200_OK
# test subawards
resp = client.post(
'/api/v2/search/spending_by_award',
content_type='application/json',
data=json.dumps({
"fields": ["Sub-Award ID"],
"filters": all_filters(),
"subawards": True
}))
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_spending_by_award_type_failure(client, refresh_matviews):
# test incomplete IDV award types
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Award ID", "Recipient Name"],
"filters": {
"award_type_codes": ["IDV_A", "IDV_B_A", "IDV_C", "IDV_D", "IDV_A_A"]
}
}))
assert resp.status_code == status.HTTP_400_BAD_REQUEST
# test bad autocomplete request for budget function
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({'filters': {}}))
assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.django_db
def test_spending_by_award_pop_zip_filter(client, mock_matviews_qs):
""" Test that filtering by pop zips works"""
mock_model_1 = MockModel(pop_zip5="00501", pop_country_code='USA', award_id=1, piid=None, fain='abc', uri=None,
type='B', pulled_from="AWARD")
mock_model_2 = MockModel(pop_zip5="00502", pop_country_code='USA', award_id=2, piid=None, fain='abd', uri=None,
type='B', pulled_from="AWARD")
mock_model_3 = MockModel(pop_zip5="00503", pop_country_code='USA', award_id=3, piid=None, fain='abe', uri=None,
type='B', pulled_from="AWARD")
add_to_mock_objects(mock_matviews_qs, [mock_model_1, mock_model_2, mock_model_3])
# test simple, single zip
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"place_of_performance_locations": [{"country": "USA", "zip": "00501"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00501'}
# test that adding a zip that has no results doesn't remove the results from the first zip
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"place_of_performance_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "10000"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00501'}
# test that we get 2 results with 2 valid zips
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"place_of_performance_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "00502"}]
}
}))
possible_results = ({'internal_id': 1, 'Place of Performance Zip5': '00501'},
{'internal_id': 2, 'Place of Performance Zip5': '00502'})
assert len(resp.data['results']) == 2
assert resp.data['results'][0] in possible_results
assert resp.data['results'][1] in possible_results
# Just to make sure it isn't returning the same thing twice somehow
assert resp.data['results'][0] != resp.data['results'][1]
@pytest.mark.django_db
def test_spending_by_award_recipient_zip_filter(client, mock_matviews_qs):
""" Test that filtering by recipient zips works"""
mock_model_1 = MockModel(recipient_location_zip5="00501", recipient_location_country_code='USA', pop_zip5='00001',
award_id=1, piid=None, fain='abc', uri=None, type='B', pulled_from="AWARD")
mock_model_2 = MockModel(recipient_location_zip5="00502", recipient_location_country_code='USA', pop_zip5='00002',
award_id=2, piid=None, fain='abd', uri=None, type='B', pulled_from="AWARD")
mock_model_3 = MockModel(recipient_location_zip5="00503", recipient_location_country_code='USA', pop_zip5='00003',
award_id=3, piid=None, fain='abe', uri=None, type='B', pulled_from="AWARD")
add_to_mock_objects(mock_matviews_qs, [mock_model_1, mock_model_2, mock_model_3])
# test simple, single zip
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00001'}
# test that adding a zip that has no results doesn't remove the results from the first zip
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "10000"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00001'}
# test that we get 2 results with 2 valid zips
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "00502"}]
}
}))
possible_results = ({'internal_id': 1, 'Place of Performance Zip5': '00001'},
{'internal_id': 2, 'Place of Performance Zip5': '00002'})
assert len(resp.data['results']) == 2
assert resp.data['results'][0] in possible_results
assert resp.data['results'][1] in possible_results
# Just to make sure it isn't returning the same thing twice somehow
assert resp.data['results'][0] != resp.data['results'][1]
@pytest.mark.django_db
def test_spending_by_award_both_zip_filter(client, mock_matviews_qs):
""" Test that filtering by both kinds of zips works"""
mock_model_1 = MockModel(recipient_location_zip5="00501", recipient_location_country_code='USA', pop_zip5='00001',
pop_country_code='USA', award_id=1, piid=None, fain='abc', uri=None, type='B',
pulled_from="AWARD")
mock_model_2 = MockModel(recipient_location_zip5="00502", recipient_location_country_code='USA', pop_zip5='00002',
pop_country_code='USA', award_id=2, piid=None, fain='abd', uri=None, type='B',
pulled_from="AWARD")
mock_model_3 = MockModel(recipient_location_zip5="00503", recipient_location_country_code='USA', pop_zip5='00003',
pop_country_code='USA', award_id=3, piid=None, fain='abe', uri=None, type='B',
pulled_from="AWARD")
add_to_mock_objects(mock_matviews_qs, [mock_model_1, mock_model_2, mock_model_3])
# test simple, single pair of zips that both match
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"}],
"place_of_performance_locations": [{"country": "USA", "zip": "00001"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00001'}
# test simple, single pair of zips that don't match
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"}],
"place_of_performance_locations": [{"country": "USA", "zip": "00002"}]
}
}))
assert len(resp.data['results']) == 0
# test 2 pairs (only one pair can be made from this)
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"fields": ["Place of Performance Zip5"],
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_locations": [{"country": "USA", "zip": "00501"},
{"country": "USA", "zip": "00502"}],
"place_of_performance_locations": [{"country": "USA", "zip": "00001"},
{"country": "USA", "zip": "00003"}]
}
}))
assert len(resp.data['results']) == 1
assert resp.data['results'][0] == {'internal_id': 1, 'Place of Performance Zip5': '00001'}
@pytest.mark.django_db
def test_spending_by_award_foreign_filter(client, mock_matviews_qs):
""" Verify that foreign country filter is returning the correct results """
mock_model_0 = MockModel(award_id=0, piid=None, fain='aaa', uri=None, type='B', pulled_from="AWARD",
recipient_location_country_name="UNITED STATES", recipient_location_country_code="USA")
mock_model_1 = MockModel(award_id=1, piid=None, fain='abc', uri=None, type='B', pulled_from="AWARD",
recipient_location_country_name="", recipient_location_country_code="USA")
mock_model_2 = MockModel(award_id=2, piid=None, fain='abd', uri=None, type='B', pulled_from="AWARD",
recipient_location_country_name="UNITED STATES", recipient_location_country_code="")
mock_model_3 = MockModel(award_id=3, piid=None, fain='abe', uri=None, type='B', pulled_from="AWARD",
recipient_location_country_name="Gibraltar", recipient_location_country_code="GIB")
add_to_mock_objects(mock_matviews_qs, [mock_model_0, mock_model_1, mock_model_2, mock_model_3])
# add_to_mock_objects(mock_matviews_qs, [mock_model_1, mock_model_3])
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
# "recipient_locations": [{"country": "USA"}]
"recipient_scope": "domestic"
},
"fields": ["Award ID"]
}))
# Three results are returned when searching for "USA"-based recipients
# e.g. "USA"; "UNITED STATES"; "USA" and "UNITED STATES";
assert len(resp.data['results']) == 3
resp = client.post(
'/api/v2/search/spending_by_award/',
content_type='application/json',
data=json.dumps({
"filters": {
"award_type_codes": ["A", "B", "C", "D"],
"recipient_scope": "foreign"
},
"fields": ["Award ID"],
}))
# One result is returned when searching for "Foreign" recipients
assert len(resp.data['results']) == 1
| 45.470779
| 118
| 0.581078
| 1,684
| 14,005
| 4.590855
| 0.104513
| 0.032596
| 0.058207
| 0.054068
| 0.870133
| 0.86082
| 0.845686
| 0.820075
| 0.807787
| 0.77338
| 0
| 0.035631
| 0.268547
| 14,005
| 307
| 119
| 45.618893
| 0.719055
| 0.089397
| 0
| 0.714859
| 0
| 0
| 0.256709
| 0.058157
| 0
| 0
| 0
| 0
| 0.116466
| 1
| 0.024096
| false
| 0
| 0.024096
| 0
| 0.048193
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
51f9ad82857168c4520d2aadbf7b5b494f03b156
| 112
|
py
|
Python
|
RecoBTag/PerformanceDB/python/PoolBTagPerformanceDBMC36X.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
RecoBTag/PerformanceDB/python/PoolBTagPerformanceDBMC36X.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
RecoBTag/PerformanceDB/python/PoolBTagPerformanceDBMC36X.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
from RecoBTag.PerformanceDB.measure.Pool_pf36 import *
from RecoBTag.PerformanceDB.measure.Pool_calo36 import *
| 37.333333
| 56
| 0.857143
| 14
| 112
| 6.714286
| 0.571429
| 0.255319
| 0.531915
| 0.680851
| 0.765957
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 0.071429
| 112
| 2
| 57
| 56
| 0.865385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
cfef6c8edf9412b85b976ae34ca7bf714c4a65f3
| 767
|
py
|
Python
|
config.py
|
Taraxa-project/taraxa-py
|
95aa0d8054bf4eba2c3200f3298421575b7bb5a0
|
[
"MIT"
] | null | null | null |
config.py
|
Taraxa-project/taraxa-py
|
95aa0d8054bf4eba2c3200f3298421575b7bb5a0
|
[
"MIT"
] | 1
|
2022-03-02T15:51:17.000Z
|
2022-03-02T15:51:17.000Z
|
config.py
|
Taraxa-project/taraxa-py
|
95aa0d8054bf4eba2c3200f3298421575b7bb5a0
|
[
"MIT"
] | null | null | null |
GAS = 7000000
GASPRICE = 10000000
boot_privateKey = "0x3800b2875669d9b2053c1aff9224ecfdc411423aac5b5a73d7a45ced1c3b9dcd"
boot_publicKey = "0x7b1fcf0ec1078320117b96e9e9ad9032c06d030cf4024a598347a4623a14a421d4f" \
"030cf25ef368ab394a45e920e14b57a259a09c41767dd50d1da27b627412a"
boot_address = "0xde2b1203d72d3549ee2f733b00b2789414c7cea5"
privateKey = "0x5076e3eae916b0c68b72a514a67fd089c643c2f306462bb64a99155bfb26757d"
publicKey = "0x45df55840c79080d0f6fa445a6ec81a758e9b8df80083b1970c661e096000fce586ae389b58e1b9c6b5346bd912bccf656f84c1506b47681a63e0997c610c99b"
address = "0x07162012099a6c3d44b264cd70aa9f390a26a0f3"
privateKey_2 = '0x5f63bb17f902989d5a354f7048fd1bcd13e7e76e6b228918c0a61458d5c1206a'
address_2 = '0xa16A181AD474C82D8753eB0C10e8DD4e5710314f'
| 47.9375
| 144
| 0.910039
| 26
| 767
| 26.653846
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.551582
| 0.052151
| 767
| 15
| 145
| 51.133333
| 0.401651
| 0
| 0
| 0
| 0
| 0
| 0.762402
| 0.762402
| 0
| 1
| 0.682768
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5c9e1b098e40687a792405f6e4190533e758e4b0
| 16,548
|
py
|
Python
|
src/python/Somatic/Mutect.py
|
Steven-N-Hart/hap.py
|
d418d4fda2dc17533bb4313ec0acda4c01884b37
|
[
"BSL-1.0"
] | null | null | null |
src/python/Somatic/Mutect.py
|
Steven-N-Hart/hap.py
|
d418d4fda2dc17533bb4313ec0acda4c01884b37
|
[
"BSL-1.0"
] | null | null | null |
src/python/Somatic/Mutect.py
|
Steven-N-Hart/hap.py
|
d418d4fda2dc17533bb4313ec0acda4c01884b37
|
[
"BSL-1.0"
] | null | null | null |
# coding=utf-8
#
# Copyright (c) 2010-2015 Illumina, Inc.
# All rights reserved.
#
# This file is distributed under the simplified BSD license.
# The full text can be found here (and in LICENSE.txt in the root folder of
# this distribution):
#
# https://github.com/Illumina/licenses/blob/master/Simplified-BSD-License.txt
"""
Date: 2/10/2015
Author: Peter Krusche <pkrusche@illumina.com>
"""
import pandas
import logging
import re
from Tools.vcfextract import vcfExtract, extractHeadersJSON
def extractMutectSNVFeatures(vcfname, tag, avg_depth=None):
""" Return a data frame with features collected from the given VCF, tagged by given type """
records = []
if not avg_depth:
logging.warn("No average depths available, normalized depth features cannot be calculated")
hdrs = extractHeadersJSON(vcfname)
tsn = ""
nsn = ""
t_sample = "S.1."
n_sample = "S.2."
try:
samples = hdrs["samples"]
for f in hdrs["fields"]:
if f["key"] == "GATKCommandLine" and f["values"]["ID"].lower() == "mutect":
clopts = f["values"]["CommandLineOptions"]
# ... tumor_sample_name=HCC2218_tumour ... normal_sample_name=HCC2218_normal
m = re.search("tumor_sample_name=([^\s]+)", clopts)
if m:
tsn = m.group(1)
for i, x in enumerate(samples):
if x == tsn:
t_sample = "S.%i." % (i+1)
break
m = re.search("normal_sample_name=([^\s]+)", clopts)
if m:
nsn = m.group(1)
for i, x in enumerate(samples):
if x == nsn:
n_sample = "S.%i." % (i+1)
break
except:
logging.warn("Unable to detect tumour / normal sample order from VCF header")
logging.info("Normal sample name : %s (prefix %s) / tumour sample name : %s (prefix %s)" % (nsn, n_sample,
tsn, t_sample))
features = ["CHROM", "POS", "REF", "ALT", "FILTER",
"I.DB", "I.TLOD", "I.NLOD", "I.ECNT",
"I.HCNT", "I.MAX_ED", "I.MIN_ED",
n_sample + "GT", t_sample + "GT",
n_sample + "DP", t_sample + "DP",
n_sample + "QSS", t_sample + "QSS",
n_sample + "AD", t_sample + "AD"]
has_warned = {}
for vr in vcfExtract(vcfname, features):
rec = {}
for i, ff in enumerate(features):
rec[ff] = vr[i]
for q in [n_sample + "GT", t_sample + "GT"]:
if not q in rec or rec[q] is None:
rec[q] = "."
if not ("feat:" + q) in has_warned:
logging.warn("Missing feature %s" % q)
has_warned["feat:" + q] = True
# fix missing features
for q in ["I.DB", "I.TLOD", "I.NLOD", "I.ECNT",
"I.HCNT", "I.MAX_ED", "I.MIN_ED",
n_sample + "GT", t_sample + "GT",
n_sample + "DP", t_sample + "DP",
n_sample + "QSS", t_sample + "QSS",
n_sample + "AD", t_sample + "AD"]:
if not q in rec or rec[q] is None:
rec[q] = 0
if not ("feat:" + q) in has_warned:
logging.warn("Missing feature %s" % q)
has_warned["feat:" + q] = True
else:
# list features
if q.endswith("AD") or q.endswith("QSS"):
if type(rec[q]) is not list:
if not q + "_PARSE_FAIL" in has_warned:
logging.warn("Cannot parse %s: %s" % (q, str(rec[q])))
has_warned[q + "_PARSE_FAIL"] = True
rec[q] = [0] * (1 + len(rec["ALT"]))
for xx in range(0, 1 + len(rec["ALT"])):
if len(rec[q]) <= xx:
rec[q].append(0)
else:
try:
rec[q][xx] = float(rec[q][xx])
except ValueError:
rec[q][xx] = 0
else:
try:
rec[q] = int(rec[q])
except ValueError:
rec[q] = -1
rec["tag"] = tag
TLOD = float(rec["I.TLOD"])
NLOD = float(rec["I.NLOD"])
n_DP = float(rec[n_sample + "DP"])
t_DP = float(rec[t_sample + "DP"])
n_DP_ratio = 0
t_DP_ratio = 0
if avg_depth:
if rec["CHROM"] in avg_depth:
n_DP_ratio = n_DP/float(avg_depth[rec["CHROM"]])
t_DP_ratio = t_DP/float(avg_depth[rec["CHROM"]])
elif not rec["CHROM"] in has_warned:
logging.warn("Cannot normalize depths on %s" % rec["CHROM"])
has_warned[rec["CHROM"]] = True
elif not "DPnorm" in has_warned:
logging.warn("Cannot normalize depths.")
has_warned["DPnorm"] = True
n_allele_ref_count = rec[n_sample + "AD"][0]
alleles_alt = rec["ALT"]
if alleles_alt == ['.']:
n_allele_alt_count = 0
else:
n_allele_alt_count = 0
for a in xrange(0, len(alleles_alt)):
n_allele_alt_count += float(rec[n_sample + "AD"][a + 1])
if n_allele_alt_count + n_allele_ref_count == 0:
n_allele_rate = 0
else:
n_allele_rate = n_allele_alt_count / float(n_allele_alt_count + n_allele_ref_count)
t_allele_ref_count = rec[t_sample + "AD"][0]
alleles_alt = rec["ALT"]
if alleles_alt == ['.']:
t_allele_alt_count = 0
else:
t_allele_alt_count = 0
for a in xrange(0, len(alleles_alt)):
t_allele_alt_count += float(rec[t_sample + "AD"][a + 1])
if t_allele_alt_count + t_allele_ref_count == 0:
t_allele_rate = 0
else:
t_allele_rate = t_allele_alt_count / float(t_allele_alt_count + t_allele_ref_count)
# Gather the computed data into a dict
qrec = {
"CHROM": rec["CHROM"],
"POS": int(rec["POS"]),
"REF": rec["REF"],
"ALT": ",".join(rec["ALT"]),
"FILTER": ",".join(rec["FILTER"]),
"DBSNP": rec["I.DB"],
"TLOD": TLOD,
"NLOD": NLOD,
"N_DP": n_DP,
"T_DP": t_DP,
"N_DP_RATE" : n_DP_ratio,
"T_DP_RATE" : t_DP_ratio,
"N_GT": rec[n_sample + "GT"],
"T_GT": rec[t_sample + "GT"],
"N_AD": rec[n_sample + "AD"],
"T_AD": rec[t_sample + "AD"],
"N_QSS": rec[n_sample + "QSS"],
"T_QSS": rec[t_sample + "QSS"],
"N_AF": n_allele_rate,
"T_AF": t_allele_rate,
"ECNT": rec["I.ECNT"],
"HCNT": rec["I.HCNT"],
"MAX_ED": rec["I.MAX_ED"],
"MIN_ED": rec["I.MIN_ED"],
"tag" : tag
}
records.append(qrec)
cols = ["CHROM", "POS", "REF", "ALT",
"FILTER", "TLOD", "NLOD", "DBSNP",
"N_DP", "T_DP", "N_DP_RATE", "T_DP_RATE", "N_GT", "T_GT",
"N_AD", "T_AD", "N_QSS", "T_QSS",
"N_AF", "T_AF",
"tag"]
if records:
df = pandas.DataFrame(records, columns=cols)
else:
df = pandas.DataFrame(columns=cols)
return df
def extractMutectIndelFeatures(vcfname, tag, avg_depth=None):
""" Return a data frame with features collected from the given VCF, tagged by given type """
records = []
if not avg_depth:
logging.warn("No average depths available, normalized depth features cannot be calculated")
hdrs = extractHeadersJSON(vcfname)
tsn = ""
nsn = ""
t_sample = "S.1."
n_sample = "S.2."
try:
samples = hdrs["samples"]
for f in hdrs["fields"]:
if f["key"] == "GATKCommandLine" and f["values"]["ID"].lower() == "mutect":
clopts = f["values"]["CommandLineOptions"]
# ... tumor_sample_name=HCC2218_tumour ... normal_sample_name=HCC2218_normal
m = re.search("tumor_sample_name=([^\s]+)", clopts)
if m:
tsn = m.group(1)
for i, x in enumerate(samples):
if x == tsn:
t_sample = "S.%i." % (i+1)
break
m = re.search("normal_sample_name=([^\s]+)", clopts)
if m:
nsn = m.group(1)
for i, x in enumerate(samples):
if x == nsn:
n_sample = "S.%i." % (i+1)
break
except:
logging.warn("Unable to detect tumour / normal sample order from VCF header")
logging.info("Normal sample name : %s (prefix %s) / tumour sample name : %s (prefix %s)" % (nsn, n_sample,
tsn, t_sample))
features = ["CHROM", "POS", "REF", "ALT", "FILTER",
"I.DB", "I.TLOD", "I.NLOD", "I.ECNT",
"I.HCNT", "I.MAX_ED", "I.MIN_ED",
"I.RPA", "I.RU", # indel only
n_sample + "GT", t_sample + "GT",
n_sample + "DP", t_sample + "DP",
n_sample + "QSS", t_sample + "QSS",
n_sample + "AD", t_sample + "AD"]
has_warned = {}
for vr in vcfExtract(vcfname, features):
rec = {}
for i, ff in enumerate(features):
rec[ff] = vr[i]
for q in [n_sample + "GT", t_sample + "GT"]:
if not q in rec or rec[q] is None:
rec[q] = "."
if not ("feat:" + q) in has_warned:
logging.warn("Missing feature %s" % q)
has_warned["feat:" + q] = True
# fix missing features
for q in ["I.DB", "I.TLOD", "I.NLOD", "I.ECNT",
"I.HCNT", "I.MAX_ED", "I.MIN_ED",
"I.RPA", "I.RU",
n_sample + "GT", t_sample + "GT",
n_sample + "DP", t_sample + "DP",
n_sample + "QSS", t_sample + "QSS",
n_sample + "AD", t_sample + "AD"]:
if not q in rec or rec[q] is None:
rec[q] = 0
if not ("feat:" + q) in has_warned:
logging.warn("Missing feature %s" % q)
has_warned["feat:" + q] = True
else:
# list features
if q.endswith("AD") or q.endswith("QSS") or q.endswith("RPA"):
if type(rec[q]) is not list:
if not q + "_PARSE_FAIL" in has_warned:
logging.warn("Cannot parse %s: %s" % (q, str(rec[q])))
has_warned[q + "_PARSE_FAIL"] = True
rec[q] = [0] * (1 + len(rec["ALT"]))
for xx in range(0, 1 + len(rec["ALT"])):
if len(rec[q]) <= xx:
rec[q].append(0)
else:
try:
rec[q][xx] = float(rec[q][xx])
except ValueError:
rec[q][xx] = 0
else:
try:
rec[q] = int(rec[q])
except ValueError:
rec[q] = -1
rec["tag"] = tag
TLOD = float(rec["I.TLOD"])
NLOD = float(rec["I.NLOD"])
n_DP = float(rec[n_sample + "DP"])
t_DP = float(rec[t_sample + "DP"])
n_DP_ratio = 0
t_DP_ratio = 0
if avg_depth:
if rec["CHROM"] in avg_depth:
n_DP_ratio = n_DP/float(avg_depth[rec["CHROM"]])
t_DP_ratio = t_DP/float(avg_depth[rec["CHROM"]])
elif not rec["CHROM"] in has_warned:
logging.warn("Cannot normalize depths on %s" % rec["CHROM"])
has_warned[rec["CHROM"]] = True
elif not "DPnorm" in has_warned:
logging.warn("Cannot normalize depths.")
has_warned["DPnorm"] = True
n_allele_ref_count = rec[n_sample + "AD"][0]
alleles_alt = rec["ALT"]
if alleles_alt == ['.']:
n_allele_alt_count = 0
else:
n_allele_alt_count = 0
for a in xrange(0, len(alleles_alt)):
n_allele_alt_count += float(rec[n_sample + "AD"][a + 1])
if n_allele_alt_count + n_allele_ref_count == 0:
n_allele_rate = 0
else:
n_allele_rate = n_allele_alt_count / float(n_allele_alt_count + n_allele_ref_count)
t_allele_ref_count = rec[t_sample + "AD"][0]
alleles_alt = rec["ALT"]
if alleles_alt == ['.']:
t_allele_alt_count = 0
else:
t_allele_alt_count = 0
for a in xrange(0, len(alleles_alt)):
t_allele_alt_count += float(rec[t_sample + "AD"][a + 1])
if t_allele_alt_count + t_allele_ref_count == 0:
t_allele_rate = 0
else:
t_allele_rate = t_allele_alt_count / float(t_allele_alt_count + t_allele_ref_count)
# Gather the computed data into a dict
qrec = {
"CHROM": rec["CHROM"],
"POS": int(rec["POS"]),
"REF": rec["REF"],
"ALT": ",".join(rec["ALT"]),
"FILTER": ",".join(rec["FILTER"]),
"DBSNP": rec["I.DB"],
"TLOD": TLOD,
"NLOD": NLOD,
"N_DP": n_DP,
"T_DP": t_DP,
"N_DP_RATE" : n_DP_ratio,
"T_DP_RATE" : t_DP_ratio,
"N_GT": rec[n_sample + "GT"],
"T_GT": rec[t_sample + "GT"],
"N_AD": rec[n_sample + "AD"],
"T_AD": rec[t_sample + "AD"],
"N_QSS": rec[n_sample + "QSS"],
"T_QSS": rec[t_sample + "QSS"],
"N_AF": n_allele_rate,
"T_AF": t_allele_rate,
"ECNT": rec["I.ECNT"],
"HCNT": rec["I.HCNT"],
"MAX_ED": rec["I.MAX_ED"],
"MIN_ED": rec["I.MIN_ED"],
"I.RPA": rec["I.RPA"],
"I.RU": rec["I.RU"],
"tag" : tag
}
records.append(qrec)
cols = ["CHROM", "POS", "REF", "ALT",
"FILTER", "TLOD", "NLOD", "DBSNP",
"N_DP", "T_DP", "N_DP_RATE", "T_DP_RATE", "N_GT", "T_GT",
"N_AD", "T_AD", "N_QSS", "T_QSS",
"N_AF", "T_AF",
"tag"]
if records:
df = pandas.DataFrame(records, columns=cols)
else:
df = pandas.DataFrame(columns=cols)
return df
| 39.971014
| 115
| 0.415518
| 1,864
| 16,548
| 3.48176
| 0.10515
| 0.038829
| 0.051772
| 0.027735
| 0.927581
| 0.926965
| 0.926965
| 0.926965
| 0.926965
| 0.926965
| 0
| 0.010212
| 0.455584
| 16,548
| 413
| 116
| 40.067797
| 0.710179
| 0.050943
| 0
| 0.963636
| 0
| 0.006061
| 0.132189
| 0.006766
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006061
| false
| 0
| 0.012121
| 0
| 0.024242
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5cacbaf9c7aa1b765fa82a93f8877d36cd75aa12
| 18,452
|
py
|
Python
|
src/datamigration/azext_datamigration/tests/latest/example_steps.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | null | null | null |
src/datamigration/azext_datamigration/tests/latest/example_steps.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 9
|
2022-03-25T19:35:49.000Z
|
2022-03-31T06:09:47.000Z
|
src/datamigration/azext_datamigration/tests/latest/example_steps.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 1
|
2022-03-10T22:13:02.000Z
|
2022-03-10T22:13:02.000Z
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .. import try_manual
# EXAMPLE: /SqlMigrationServices/put/Create or Update SQL Migration Service with maximum parameters.
@try_manual
def step_sql_service_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-service create '
'--location "northeurope" '
'--resource-group "{rg}" '
'--name "{mySqlMigrationService}"',
checks=[])
test.cmd('az datamigration sql-service wait --created '
'--resource-group "{rg}" '
'--name "{mySqlMigrationService}"',
checks=checks)
# EXAMPLE: /SqlMigrationServices/put/Create or Update SQL Migration Service with minimum parameters.
@try_manual
def step_sql_service_create2(test, checks=None):
return step_sql_service_create(test, checks)
test.cmd('az datamigration sql-service wait --created '
'--resource-group "{rg}" '
'--name "{mySqlMigrationService}"',
checks=checks)
# EXAMPLE: /SqlMigrationServices/get/Get Migration Service.
@try_manual
def step_sql_service_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-service show '
'--resource-group "{rg}" '
'--name "{mySqlMigrationService2}"',
checks=checks)
# EXAMPLE: /SqlMigrationServices/get/Get Migration Services in the Resource Group.
@try_manual
def step_sql_service_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-service list '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /SqlMigrationServices/get/Get Services in the Subscriptions.
@try_manual
def step_sql_service_list2(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-service list '
'-g ""',
checks=checks)
# EXAMPLE: /SqlMigrationServices/get/List database migrations attached to the service.
@try_manual
def step_sql_service_list_migration(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-service list-migration '
'--resource-group "{rg}" '
'--name "{mySqlMigrationService2}"',
checks=checks)
# EXAMPLE: /SqlMigrationServices/patch/Update SQL Migration Service.
@try_manual
def step_sql_service_update(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-service update '
'--tags mytag="myval" '
'--resource-group "{rg}" '
'--name "{mySqlMigrationService}"',
checks=checks)
# EXAMPLE: /SqlMigrationServices/post/Delete the integration runtime node.
@try_manual
def step_sql_service_delete_node(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-service delete-node '
'--ir-name "IRName" '
'--node-name "nodeName" '
'--resource-group "{rg}" '
'--name "{mySqlMigrationService2}"',
checks=checks)
# EXAMPLE: /SqlMigrationServices/post/Regenerate the of Authentication Keys.
@try_manual
def step_sql_service_regenerate_auth_key(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-service regenerate-auth-key '
'--key-name "authKey1" '
'--resource-group "{rg}" '
'--name "{mySqlMigrationService2}"',
checks=checks)
# EXAMPLE: /SqlMigrationServices/post/Retrieve the List of Authentication Keys.
@try_manual
def step_sql_service_list_auth_key(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-service list-auth-key '
'--resource-group "{rg}" '
'--name "{mySqlMigrationService2}"',
checks=checks)
# EXAMPLE: /SqlMigrationServices/post/Retrieve the Monitoring Data.
@try_manual
def step_sql_service_list_integration_runtime_metric(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-service list-integration-runtime-metric '
'--resource-group "{rg}" '
'--name "{mySqlMigrationService2}"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlDb/put/Create or Update Database Migration resource with Maximum parameters.
@try_manual
def step_sql_db_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-db create '
'--migration-service "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.DataMigrati'
'on/sqlMigrationServices/{mySqlMigrationService}" '
'--scope "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Sql/servers/sqldbinstan'
'ce" '
'--source-database-name "aaa" '
'--source-sql-connection authentication="WindowsAuthentication" data-source="aaa" encrypt-connection=true '
'password="placeholder" trust-server-certificate=true user-name="bbb" '
'--table-list "[Schema1].[TableName1]" "[Schema2].[TableName2]" '
'--target-sql-connection authentication="SqlAuthentication" data-source="sqldbinstance" '
'encrypt-connection=true password="placeholder" trust-server-certificate=true user-name="bbb" '
'--resource-group "{rg}" '
'--sqldb-instance-name "sqldbinstance" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlDb/put/Create or Update Database Migration resource with Minimum parameters.
@try_manual
def step_sql_db_create2(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-db create '
'--migration-service "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.DataMigrati'
'on/sqlMigrationServices/{mySqlMigrationService}" '
'--scope "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Sql/servers/sqldbinstan'
'ce" '
'--source-database-name "aaa" '
'--source-sql-connection authentication="WindowsAuthentication" data-source="aaa" encrypt-connection=true '
'password="placeholder" trust-server-certificate=true user-name="bbb" '
'--target-sql-connection authentication="SqlAuthentication" data-source="sqldbinstance" '
'encrypt-connection=true password="placeholder" trust-server-certificate=true user-name="bbb" '
'--resource-group "{rg}" '
'--sqldb-instance-name "sqldbinstance" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlDb/get/Get Sql DB database Migration with the expand parameter.
@try_manual
def step_sql_db_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-db show '
'--expand "MigrationStatusDetails" '
'--resource-group "{rg}" '
'--sqldb-instance-name "sqldbinstance" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlDb/get/Get Sql DB database Migration without the expand parameter.
@try_manual
def step_sql_db_show2(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-db show '
'--resource-group "{rg}" '
'--sqldb-instance-name "sqldbinstance" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlDb/post/Stop ongoing migration for the database.
@try_manual
def step_sql_db_cancel(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-db cancel '
'--migration-operation-id "9a90bb84-e70f-46f7-b0ae-1aef5b3b9f07" '
'--resource-group "{rg}" '
'--sqldb-instance-name "sqldbinstance" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlDb/delete/Delete Database Migration resource.
@try_manual
def step_sql_db_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-db delete -y '
'--resource-group "{rg}" '
'--sqldb-instance-name "sqldbinstance" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlMi/put/Create or Update Database Migration resource with Maximum parameters.
@try_manual
def step_sql_managed_instance_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-managed-instance create '
'--managed-instance-name "managedInstance1" '
'--source-location "{{\\"fileShare\\":{{\\"path\\":\\"C:\\\\\\\\aaa\\\\\\\\bbb\\\\\\\\ccc\\",\\"password\\'
'":\\"placeholder\\",\\"username\\":\\"name\\"}}}}" '
'--target-location account-key="abcd" storage-account-resource-id="account.database.windows.net" '
'--migration-service "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.DataMigrati'
'on/sqlMigrationServices/{mySqlMigrationService}" '
'--offline-configuration last-backup-name="last_backup_file_name" offline=true '
'--scope "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Sql/managedInstances/in'
'stance" '
'--source-database-name "aaa" '
'--source-sql-connection authentication="WindowsAuthentication" data-source="aaa" encrypt-connection=true '
'password="placeholder" trust-server-certificate=true user-name="bbb" '
'--resource-group "{rg}" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlMi/put/Create or Update Database Migration resource with Minimum parameters.
@try_manual
def step_sql_managed_instance_create2(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-managed-instance create '
'--managed-instance-name "managedInstance1" '
'--source-location "{{\\"fileShare\\":{{\\"path\\":\\"C:\\\\\\\\aaa\\\\\\\\bbb\\\\\\\\ccc\\",\\"password\\'
'":\\"placeholder\\",\\"username\\":\\"name\\"}}}}" '
'--target-location account-key="abcd" storage-account-resource-id="account.database.windows.net" '
'--migration-service "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.DataMigrati'
'on/sqlMigrationServices/{mySqlMigrationService}" '
'--scope "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Sql/managedInstances/in'
'stance" '
'--source-database-name "aaa" '
'--source-sql-connection authentication="WindowsAuthentication" data-source="aaa" encrypt-connection=true '
'password="placeholder" trust-server-certificate=true user-name="bbb" '
'--resource-group "{rg}" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlMi/get/Get Sql MI database Migration with the expand parameter.
@try_manual
def step_sql_managed_instance_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-managed-instance show '
'--expand "MigrationStatusDetails" '
'--managed-instance-name "managedInstance1" '
'--resource-group "{rg}" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlMi/get/Get Sql MI database Migration without the expand parameter.
@try_manual
def step_sql_managed_instance_show2(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-managed-instance show '
'--managed-instance-name "managedInstance1" '
'--resource-group "{rg}" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlMi/post/Cutover online migration operation for the database.
@try_manual
def step_sql_managed_instance_cutover(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-managed-instance cutover '
'--managed-instance-name "managedInstance1" '
'--migration-operation-id "4124fe90-d1b6-4b50-b4d9-46d02381f59a" '
'--resource-group "{rg}" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlMi/post/Stop ongoing migration for the database.
@try_manual
def step_sql_managed_instance_cancel(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-managed-instance cancel '
'--managed-instance-name "managedInstance1" '
'--migration-operation-id "4124fe90-d1b6-4b50-b4d9-46d02381f59a" '
'--resource-group "{rg}" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlVm/put/Create or Update Database Migration resource with Maximum parameters.
@try_manual
def step_sql_vm_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-vm create '
'--source-location "{{\\"fileShare\\":{{\\"path\\":\\"C:\\\\\\\\aaa\\\\\\\\bbb\\\\\\\\ccc\\",\\"password\\'
'":\\"placeholder\\",\\"username\\":\\"name\\"}}}}" '
'--target-location account-key="abcd" storage-account-resource-id="account.database.windows.net" '
'--migration-service "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.DataMigrati'
'on/sqlMigrationServices/{mySqlMigrationService}" '
'--offline-configuration last-backup-name="last_backup_file_name" offline=true '
'--scope "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.SqlVirtualMachine/sqlVi'
'rtualMachines/testvm" '
'--source-database-name "aaa" '
'--source-sql-connection authentication="WindowsAuthentication" data-source="aaa" encrypt-connection=true '
'password="placeholder" trust-server-certificate=true user-name="bbb" '
'--resource-group "{rg}" '
'--sql-vm-name "testvm" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlVm/put/Create or Update Database Migration resource with Minimum parameters.
@try_manual
def step_sql_vm_create2(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-vm create '
'--source-location "{{\\"fileShare\\":{{\\"path\\":\\"C:\\\\\\\\aaa\\\\\\\\bbb\\\\\\\\ccc\\",\\"password\\'
'":\\"placeholder\\",\\"username\\":\\"name\\"}}}}" '
'--target-location account-key="abcd" storage-account-resource-id="account.database.windows.net" '
'--migration-service "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.DataMigrati'
'on/sqlMigrationServices/{mySqlMigrationService}" '
'--scope "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.SqlVirtualMachine/sqlVi'
'rtualMachines/testvm" '
'--source-database-name "aaa" '
'--source-sql-connection authentication="WindowsAuthentication" data-source="aaa" encrypt-connection=true '
'password="placeholder" trust-server-certificate=true user-name="bbb" '
'--resource-group "{rg}" '
'--sql-vm-name "testvm" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlVm/get/Get Sql VM database Migration with the expand parameter.
@try_manual
def step_sql_vm_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-vm show '
'--expand "MigrationStatusDetails" '
'--resource-group "{rg}" '
'--sql-vm-name "testvm" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlVm/get/Get Sql VM database Migration without the expand parameter.
@try_manual
def step_sql_vm_show2(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-vm show '
'--resource-group "{rg}" '
'--sql-vm-name "testvm" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlVm/post/Cutover online migration operation for the database.
@try_manual
def step_sql_vm_cutover(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-vm cutover '
'--migration-operation-id "4124fe90-d1b6-4b50-b4d9-46d02381f59a" '
'--resource-group "{rg}" '
'--sql-vm-name "testvm" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /DatabaseMigrationsSqlVm/post/Stop ongoing migration for the database.
@try_manual
def step_sql_vm_cancel(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-vm cancel '
'--migration-operation-id "4124fe90-d1b6-4b50-b4d9-46d02381f59a" '
'--resource-group "{rg}" '
'--sql-vm-name "testvm" '
'--target-db-name "db1"',
checks=checks)
# EXAMPLE: /SqlMigrationServices/delete/Delete SQL Migration Service.
@try_manual
def step_sql_service_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az datamigration sql-service delete -y '
'--resource-group "{rg}" '
'--name "{mySqlMigrationService2}"',
checks=checks)
| 42.321101
| 120
| 0.627303
| 1,893
| 18,452
| 6.031696
| 0.101426
| 0.024435
| 0.035295
| 0.040725
| 0.928972
| 0.921265
| 0.898318
| 0.878262
| 0.86206
| 0.824137
| 0
| 0.010445
| 0.226913
| 18,452
| 435
| 121
| 42.418391
| 0.789975
| 0.167245
| 0
| 0.829341
| 0
| 0.035928
| 0.537387
| 0.299687
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08982
| false
| 0.035928
| 0.002994
| 0
| 0.095808
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7a91a5fefb97cc99c15ff27ab6b5bfa9d1526b53
| 13,347
|
py
|
Python
|
elm/nn/gated.py
|
jinxu06/gsubsampling
|
2e0cace553cf43835709a34a11f9c15b08c15004
|
[
"Apache-2.0"
] | 12
|
2021-06-11T12:17:58.000Z
|
2021-12-16T07:36:47.000Z
|
elm/nn/gated.py
|
jinxu06/gsubsampling
|
2e0cace553cf43835709a34a11f9c15b08c15004
|
[
"Apache-2.0"
] | null | null | null |
elm/nn/gated.py
|
jinxu06/gsubsampling
|
2e0cace553cf43835709a34a11f9c15b08c15004
|
[
"Apache-2.0"
] | 1
|
2022-01-31T19:39:06.000Z
|
2022-01-31T19:39:06.000Z
|
import torch
import torch.nn.functional as F
from .conv_nn import BaseConvNN
from sylvester.layers import GatedConv2d, GatedConvTranspose2d
class GatedConvNN(BaseConvNN):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding_mode='constant',
activation=F.relu,
out_activation=None,
use_bias=False,
h_norm=None,
g_norm=None):
super().__init__(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding_mode=padding_mode,
activation=activation,
out_activation=out_activation,
use_bias=use_bias)
device = torch.cuda.current_device()
self.layers = []
for i in range(self.num_layers):
layer = {}
in_channels = self.in_channels if i == 0 else self.out_channels[i-1]
layer['conv'] = torch.nn.Conv2d(in_channels,
self.out_channels[i]*2,
kernel_size=self.kernel_size[i],
stride=self.stride[i],
bias=self.use_bias)
layer['activation'] = self.activation if i < self.num_layers - 1 else self.out_activation
#xavier_uniform_init(layer['conv'].weight, layer['activation'])
torch.nn.init.constant_(layer['conv'].bias, 0.)
self.add_module("conv_{}".format(i+1), layer['conv'])
if i < self.num_layers - 1:
# - Hiddens
if h_norm == 'in':
layer['h_norm'] = torch.nn.InstanceNorm2d(self.out_channels[i], affine=True).to(device)
elif h_norm == 'bn':
layer['h_norm'] = torch.nn.BatchNorm2d(self.out_channels[i]).to(device)
if 'h_norm' in layer:
self.add_module("h_norm_{}".format(i+1), layer['h_norm'])
# - Gates
if g_norm == 'in':
layer['g_norm'] = torch.nn.InstanceNorm2d(self.out_channels[i], affine=True).to(device)
elif g_norm == 'bn':
layer['g_norm'] = torch.nn.BatchNorm2d(self.out_channels[i]).to(device)
if 'g_norm' in layer:
self.add_module("g_norm_{}".format(i+1), layer['g_norm'])
self.layers.append(layer)
def forward(self, x, ps=None):
y = x
for i, layer in enumerate(self.layers):
_, _, h, w = y.size()
if i < self.num_layers - 1:
pad = get_same_pad(size=[h, w], kernel_size=self.kernel_size[i], stride=self.stride[i], compressed=True)
y = F.pad(y, pad, mode=self.padding_mode)
y = layer['conv'](y)
h, g = torch.chunk(y, chunks=2, dim=1)
if 'h_norm' in layer:
h = layer['h_norm'](h)
if layer['activation'] is not None:
h = layer['activation'](h)
if 'g_norm' in layer:
g = layer['g_norm'](g)
g = F.sigmoid(g)
y = h * g
return y# , None
class GatedConvTransposeNN(BaseConvNN):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding_mode='constant',
activation=F.relu,
out_activation=None,
use_bias=False,
h_norm=None,
g_norm=None):
super().__init__(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding_mode=padding_mode,
activation=activation,
out_activation=out_activation,
use_bias=use_bias)
device = torch.cuda.current_device()
self.layers = []
for i in range(self.num_layers):
layer = {}
in_channels = self.in_channels if i == 0 else self.out_channels[i-1]
if i == 0:
layer['conv_transpose'] = torch.nn.ConvTranspose2d(in_channels,
self.out_channels[i]*2,
kernel_size=self.kernel_size[i],
stride=self.stride[i],
output_padding=self.stride[i]-1,
bias=self.use_bias)
else:
layer['conv_transpose'] = torch.nn.ConvTranspose2d(in_channels,
self.out_channels[i]*2,
kernel_size=self.kernel_size[i],
stride=self.stride[i],
padding=(self.kernel_size[i]-1)//2,
output_padding=self.stride[i]-1,
bias=self.use_bias)
# assert self.kernel_size[i] % 2 == 1, "Currently, paddings are properly handled only for odd number kernel size."
layer['activation'] = self.activation if i < self.num_layers - 1 else self.out_activation
#xavier_uniform_init(layer['conv_transpose'].weight, layer['activation'])
torch.nn.init.constant_(layer['conv_transpose'].bias, 0.)
self.add_module("conv_transpose_{}".format(i+1), layer['conv_transpose'])
if i > 0:
# - Hiddens
if h_norm == 'in':
layer['h_norm'] = torch.nn.InstanceNorm2d(self.out_channels[i], affine=True).to(device)
elif h_norm == 'bn':
layer['h_norm'] = torch.nn.BatchNorm2d(self.out_channels[i]).to(device)
if 'h_norm' in layer:
self.add_module("h_norm_{}".format(i+1), layer['h_norm'])
# - Gates
if g_norm == 'in':
layer['g_norm'] = torch.nn.InstanceNorm2d(self.out_channels[i], affine=True).to(device)
elif g_norm == 'bn':
layer['g_norm'] = torch.nn.BatchNorm2d(self.out_channels[i]).to(device)
if 'g_norm' in layer:
self.add_module("g_norm_{}".format(i+1), layer['g_norm'])
self.layers.append(layer)
def forward(self, x, ps=None):
y = x
for _, layer in enumerate(self.layers):
y = layer['conv_transpose'](y)
h, g = torch.chunk(y, chunks=2, dim=1)
if 'h_norm' in layer:
h = layer['h_norm'](h)
if layer['activation'] is not None:
h = layer['activation'](h)
if 'g_norm' in layer:
g = layer['g_norm'](g)
g = F.sigmoid(g)
y = h * g
return y
class GatedEquivariantConvNN(BaseConvNN):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding_mode='circular',
activation=F.relu,
out_activation=None,
use_bias=False,
h_norm=None,
g_norm=None):
super().__init__(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding_mode=padding_mode,
activation=activation,
out_activation=out_activation,
use_bias=use_bias)
device = torch.cuda.current_device()
self.layers = []
for i in range(self.num_layers):
layer = {}
in_channels = self.in_channels if i == 0 else self.out_channels[i-1]
layer['conv'] = torch.nn.Conv2d(in_channels,
self.out_channels[i]*2,
kernel_size=self.kernel_size[i],
stride=1,
bias=self.use_bias)
if self.stride[i] > 1:
subsampling = EquivariantSubSampling(scale_ratio=stride[i])
layer['subsampling'] = subsampling
layer['activation'] = self.activation if i < self.num_layers - 1 else self.out_activation
xavier_uniform_init(layer['conv'].weight, layer['activation'])
torch.nn.init.constant_(layer['conv'].bias, 0.)
self.add_module("conv_{}".format(i+1), layer['conv'])
if i < self.num_layers - 1:
# - Hiddens
if h_norm == 'in':
layer['h_norm'] = torch.nn.InstanceNorm2d(self.out_channels[i], affine=True).to(device)
elif h_norm == 'bn':
layer['h_norm'] = torch.nn.BatchNorm2d(self.out_channels[i]).to(device)
if 'h_norm' in layer:
self.add_module("h_norm_{}".format(i+1), layer['h_norm'])
# - Gates
if g_norm == 'in':
layer['g_norm'] = torch.nn.InstanceNorm2d(self.out_channels[i], affine=True).to(device)
elif g_norm == 'bn':
layer['g_norm'] = torch.nn.BatchNorm2d(self.out_channels[i]).to(device)
if 'g_norm' in layer:
self.add_module("g_norm_{}".format(i+1), layer['g_norm'])
self.layers.append(layer)
def forward(self, x, ps=None):
y = x
output_ps = []
ps_idx = 0
for i, layer in enumerate(self.layers):
_, _, h, w = y.size()
pad = get_same_pad(size=[h, w], kernel_size=self.kernel_size[i], stride=1, compressed=True)
y = F.pad(y, pad, mode=self.padding_mode)
y = layer['conv'](y)
if self.stride[i] > 1:
if ps is not None:
y, p = layer['subsampling'](y, ps[:, ps_idx])
else:
y, p = layer['subsampling'](y)
output_ps.append(p)
ps_idx += 1
h, g = torch.chunk(y, chunks=2, dim=1)
if 'h_norm' in layer:
h = layer['h_norm'](h)
if layer['activation'] is not None:
h = layer['activation'](h)
if 'g_norm' in layer:
g = layer['g_norm'](g)
g = F.sigmoid(g)
y = h * g
return y, torch.stack(output_ps, 1)
class GatedEquivariantConvTransposeNN(BaseConvNN):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding_mode='circular',
activation=F.relu,
out_activation=None,
use_bias=False,
h_norm=None,
g_norm=None):
super().__init__(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding_mode=padding_mode,
activation=activation,
out_activation=out_activation,
use_bias=use_bias)
device = torch.cuda.current_device()
self.layers = []
for i in range(self.num_layers):
layer = {}
in_channels = self.in_channels if i == 0 else self.out_channels[i-1]
if self.stride[i] > 1:
upsampling = EquivariantUpSampling(scale_ratio=self.stride[i])
layer['upsampling'] = upsampling
layer['conv_transpose'] = torch.nn.ConvTranspose2d(in_channels,
self.out_channels[i]*2,
kernel_size=self.kernel_size[i],
stride=1,
padding=self.kernel_size[i]-1,
bias=self.use_bias)
assert self.kernel_size[i] % 2 == 1, "Currently, paddings are properly handled only for odd number kernel size."
layer['activation'] = self.activation if i < self.num_layers - 1 else self.out_activation
xavier_uniform_init(layer['conv_transpose'].weight, layer['activation'])
torch.nn.init.constant_(layer['conv_transpose'].bias, 0.)
self.add_module("conv_transpose_{}".format(i+1), layer['conv_transpose'])
if i > 0:
# - Hiddens
if h_norm == 'in':
layer['h_norm'] = torch.nn.InstanceNorm2d(self.out_channels[i], affine=True).to(device)
elif h_norm == 'bn':
layer['h_norm'] = torch.nn.BatchNorm2d(self.out_channels[i]).to(device)
if 'h_norm' in layer:
self.add_module("h_norm_{}".format(i+1), layer['h_norm'])
# - Gates
if g_norm == 'in':
layer['g_norm'] = torch.nn.InstanceNorm2d(self.out_channels[i], affine=True).to(device)
elif g_norm == 'bn':
layer['g_norm'] = torch.nn.BatchNorm2d(self.out_channels[i]).to(device)
if 'g_norm' in layer:
self.add_module("g_norm_{}".format(i+1), layer['g_norm'])
self.layers.append(layer)
def forward(self, x, ps=None):
y = x
if ps is not None:
ps = deque(torch.unbind(ps, dim=1))
for i, layer in enumerate(self.layers):
if self.stride[i] > 1:
y = layer['upsampling'](y, ps.pop())
#y = layer['upsampling'](y)
y = F.pad(y, pad=[(self.kernel_size[i]-1)//2 for _ in range(4)], mode=self.padding_mode)
y = layer['conv_transpose'](y) * self.stride[i]**2 # dim_group, rescale
h, g = torch.chunk(y, chunks=2, dim=1)
if 'h_norm' in layer:
h = layer['h_norm'](h)
if layer['activation'] is not None:
h = layer['activation'](h)
if 'g_norm' in layer:
g = layer['g_norm'](g)
g = F.sigmoid(g)
y = h * g
return y
| 40.081081
| 120
| 0.531505
| 1,644
| 13,347
| 4.108881
| 0.069951
| 0.029608
| 0.055514
| 0.059215
| 0.915914
| 0.886751
| 0.875056
| 0.866321
| 0.866173
| 0.866173
| 0
| 0.011176
| 0.342998
| 13,347
| 333
| 121
| 40.081081
| 0.759152
| 0.027871
| 0
| 0.885813
| 0
| 0
| 0.070514
| 0
| 0
| 0
| 0
| 0
| 0.00346
| 1
| 0.027682
| false
| 0
| 0.013841
| 0
| 0.069204
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8fa43ed94d9bfd4892f53f24ad267f6df106aeb1
| 8,202
|
py
|
Python
|
aws_marketplace/using_model_packages/improving_industrial_workplace_safety/src/model_package_arns.py
|
jerrypeng7773/amazon-sagemaker-examples
|
c5ddecce1f739a345465b9a38b064983a129141d
|
[
"Apache-2.0"
] | 2,610
|
2020-10-01T14:14:53.000Z
|
2022-03-31T18:02:31.000Z
|
aws_marketplace/using_model_packages/improving_industrial_workplace_safety/src/model_package_arns.py
|
jerrypeng7773/amazon-sagemaker-examples
|
c5ddecce1f739a345465b9a38b064983a129141d
|
[
"Apache-2.0"
] | 1,959
|
2020-09-30T20:22:42.000Z
|
2022-03-31T23:58:37.000Z
|
aws_marketplace/using_model_packages/improving_industrial_workplace_safety/src/model_package_arns.py
|
jerrypeng7773/amazon-sagemaker-examples
|
c5ddecce1f739a345465b9a38b064983a129141d
|
[
"Apache-2.0"
] | 2,052
|
2020-09-30T22:11:46.000Z
|
2022-03-31T23:02:51.000Z
|
class ModelPackageArnProvider:
@staticmethod
def get_construction_worker_model_package_arn(current_region):
mapping = {
"ap-south-1": "arn:aws:sagemaker:ap-south-1:077584701553:model-package/construction-worker-v1-copy-06-3f94f03fae021ca61cb609d42d0118c2",
"ap-northeast-2": "arn:aws:sagemaker:ap-northeast-2:745090734665:model-package/construction-worker-v1-copy-06-3f94f03fae021ca61cb609d42d0118c2",
"ap-southeast-1": "arn:aws:sagemaker:ap-southeast-1:192199979996:model-package/construction-worker-v1-copy-06-3f94f03fae021ca61cb609d42d0118c2",
"ap-southeast-2": "arn:aws:sagemaker:ap-southeast-2:666831318237:model-package/construction-worker-v1-copy-06-3f94f03fae021ca61cb609d42d0118c2",
"ap-northeast-1": "arn:aws:sagemaker:ap-northeast-1:977537786026:model-package/construction-worker-v1-copy-06-3f94f03fae021ca61cb609d42d0118c2",
"ca-central-1": "arn:aws:sagemaker:ca-central-1:470592106596:model-package/construction-worker-v1-copy-06-3f94f03fae021ca61cb609d42d0118c2",
"eu-central-1": "arn:aws:sagemaker:eu-central-1:446921602837:model-package/construction-worker-v1-copy-06-3f94f03fae021ca61cb609d42d0118c2",
"eu-west-1": "arn:aws:sagemaker:eu-west-1:985815980388:model-package/construction-worker-v1-copy-06-3f94f03fae021ca61cb609d42d0118c2",
"eu-west-2": "arn:aws:sagemaker:eu-west-2:856760150666:model-package/construction-worker-v1-copy-06-3f94f03fae021ca61cb609d42d0118c2",
"us-east-1": "arn:aws:sagemaker:us-east-1:865070037744:model-package/construction-worker-v1-copy-06-3f94f03fae021ca61cb609d42d0118c2",
"us-east-2": "arn:aws:sagemaker:us-east-2:057799348421:model-package/construction-worker-v1-copy-06-3f94f03fae021ca61cb609d42d0118c2",
"us-west-1": "arn:aws:sagemaker:us-west-1:382657785993:model-package/construction-worker-v1-copy-06-3f94f03fae021ca61cb609d42d0118c2",
"us-west-2": "arn:aws:sagemaker:us-west-2:594846645681:model-package/construction-worker-v1-copy-06-3f94f03fae021ca61cb609d42d0118c2",
}
return mapping[current_region]
@staticmethod
def get_machine_detection_model_package_arn(current_region):
mapping = {
"ap-northeast-1": "arn:aws:sagemaker:ap-northeast-1:977537786026:model-package/indus-construction-machines-eed6b262d4df3c8f46341abe757c5b63",
"ap-northeast-2": "arn:aws:sagemaker:ap-northeast-2:745090734665:model-package/indus-construction-machines-eed6b262d4df3c8f46341abe757c5b63",
"ap-southeast-1": "arn:aws:sagemaker:ap-southeast-1:192199979996:model-package/indus-construction-machines-eed6b262d4df3c8f46341abe757c5b63",
"ap-southeast-2": "arn:aws:sagemaker:ap-southeast-2:666831318237:model-package/indus-construction-machines-eed6b262d4df3c8f46341abe757c5b63",
"us-east-1": "arn:aws:sagemaker:us-east-1:865070037744:model-package/indus-construction-machines-eed6b262d4df3c8f46341abe757c5b63",
"eu-central-1": "arn:aws:sagemaker:eu-central-1:446921602837:model-package/indus-construction-machines-eed6b262d4df3c8f46341abe757c5b63",
"ap-south-1": "arn:aws:sagemaker:ap-south-1:077584701553:model-package/indus-construction-machines-eed6b262d4df3c8f46341abe757c5b63",
"ca-central-1": "arn:aws:sagemaker:ca-central-1:470592106596:model-package/indus-construction-machines-eed6b262d4df3c8f46341abe757c5b63",
"eu-west-1": "arn:aws:sagemaker:eu-west-1:985815980388:model-package/indus-construction-machines-eed6b262d4df3c8f46341abe757c5b63",
"eu-west-2": "arn:aws:sagemaker:eu-west-2:856760150666:model-package/indus-construction-machines-eed6b262d4df3c8f46341abe757c5b63",
"us-west-1": "arn:aws:sagemaker:us-west-1:382657785993:model-package/indus-construction-machines-eed6b262d4df3c8f46341abe757c5b63",
"us-east-2": "arn:aws:sagemaker:us-east-2:057799348421:model-package/indus-construction-machines-eed6b262d4df3c8f46341abe757c5b63",
"us-west-2": "arn:aws:sagemaker:us-west-2:594846645681:model-package/indus-construction-machines-eed6b262d4df3c8f46341abe757c5b63",
}
return mapping[current_region]
@staticmethod
def get_ppe_detection_model_package_arn(current_region):
mapping = {
"ap-south-1": "arn:aws:sagemaker:ap-south-1:077584701553:model-package/ppe-v1-copy-06-25-32446c1aac94cdb4e4d0e131f2efe62f",
"ap-northeast-2": "arn:aws:sagemaker:ap-northeast-2:745090734665:model-package/ppe-v1-copy-06-25-32446c1aac94cdb4e4d0e131f2efe62f",
"ap-southeast-1": "arn:aws:sagemaker:ap-southeast-1:192199979996:model-package/ppe-v1-copy-06-25-32446c1aac94cdb4e4d0e131f2efe62f",
"ap-southeast-2": "arn:aws:sagemaker:ap-southeast-2:666831318237:model-package/ppe-v1-copy-06-25-32446c1aac94cdb4e4d0e131f2efe62f",
"ap-northeast-1": "arn:aws:sagemaker:ap-northeast-1:977537786026:model-package/ppe-v1-copy-06-25-32446c1aac94cdb4e4d0e131f2efe62f",
"ca-central-1": "arn:aws:sagemaker:ca-central-1:470592106596:model-package/ppe-v1-copy-06-25-32446c1aac94cdb4e4d0e131f2efe62f",
"eu-central-1": "arn:aws:sagemaker:eu-central-1:446921602837:model-package/ppe-v1-copy-06-25-32446c1aac94cdb4e4d0e131f2efe62f",
"eu-west-1": "arn:aws:sagemaker:eu-west-1:985815980388:model-package/ppe-v1-copy-06-25-32446c1aac94cdb4e4d0e131f2efe62f",
"eu-west-2": "arn:aws:sagemaker:eu-west-2:856760150666:model-package/ppe-v1-copy-06-25-32446c1aac94cdb4e4d0e131f2efe62f",
"us-east-1": "arn:aws:sagemaker:us-east-1:865070037744:model-package/ppe-v1-copy-06-25-32446c1aac94cdb4e4d0e131f2efe62f",
"us-east-2": "arn:aws:sagemaker:us-east-2:057799348421:model-package/ppe-v1-copy-06-25-32446c1aac94cdb4e4d0e131f2efe62f",
"us-west-1": "arn:aws:sagemaker:us-west-1:382657785993:model-package/ppe-v1-copy-06-25-32446c1aac94cdb4e4d0e131f2efe62f",
"us-west-2": "arn:aws:sagemaker:us-west-2:594846645681:model-package/ppe-v1-copy-06-25-32446c1aac94cdb4e4d0e131f2efe62f",
}
return mapping[current_region]
@staticmethod
def get_hard_hat_detection_model_package_arn(current_region):
mapping = {
"ap-south-1": "arn:aws:sagemaker:ap-south-1:077584701553:model-package/hardhat-detection-gpu-2-e3449f86581997ece577e718d771238d",
"ap-northeast-2": "arn:aws:sagemaker:ap-northeast-2:745090734665:model-package/hardhat-detection-gpu-2-e3449f86581997ece577e718d771238d",
"ap-southeast-1": "arn:aws:sagemaker:ap-southeast-1:192199979996:model-package/hardhat-detection-gpu-2-e3449f86581997ece577e718d771238d",
"ap-southeast-2": "arn:aws:sagemaker:ap-southeast-2:666831318237:model-package/hardhat-detection-gpu-2-e3449f86581997ece577e718d771238d",
"ap-northeast-1": "arn:aws:sagemaker:ap-northeast-1:977537786026:model-package/hardhat-detection-gpu-2-e3449f86581997ece577e718d771238d",
"ca-central-1": "arn:aws:sagemaker:ca-central-1:470592106596:model-package/hardhat-detection-gpu-2-e3449f86581997ece577e718d771238d",
"eu-central-1": "arn:aws:sagemaker:eu-central-1:446921602837:model-package/hardhat-detection-gpu-2-e3449f86581997ece577e718d771238d",
"eu-west-1": "arn:aws:sagemaker:eu-west-1:985815980388:model-package/hardhat-detection-gpu-2-e3449f86581997ece577e718d771238d",
"eu-west-2": "arn:aws:sagemaker:eu-west-2:856760150666:model-package/hardhat-detection-gpu-2-e3449f86581997ece577e718d771238d",
"us-east-1": "arn:aws:sagemaker:us-east-1:865070037744:model-package/hardhat-detection-gpu-2-e3449f86581997ece577e718d771238d",
"us-east-2": "arn:aws:sagemaker:us-east-2:057799348421:model-package/hardhat-detection-gpu-2-e3449f86581997ece577e718d771238d",
"us-west-1": "arn:aws:sagemaker:us-west-1:382657785993:model-package/hardhat-detection-gpu-2-e3449f86581997ece577e718d771238d",
"us-west-2": "arn:aws:sagemaker:us-west-2:594846645681:model-package/hardhat-detection-gpu-2-e3449f86581997ece577e718d771238d",
}
return mapping[current_region]
| 106.519481
| 156
| 0.751402
| 960
| 8,202
| 6.389583
| 0.060417
| 0.109553
| 0.12716
| 0.083469
| 0.982556
| 0.982556
| 0.963971
| 0.89762
| 0.774046
| 0.672481
| 0
| 0.256263
| 0.109364
| 8,202
| 76
| 157
| 107.921053
| 0.583436
| 0
| 0
| 0.164384
| 0
| 0.712329
| 0.79505
| 0.724823
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054795
| false
| 0
| 0
| 0
| 0.123288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
8fbc1e7ef842f3d39b06ffaf70e663ce1bce757f
| 235
|
py
|
Python
|
nmigen_boards/nexys4ddr.py
|
lethalbit/nmigen-boards
|
aaf18252e457ff95257137da2a629820c0ff2bfa
|
[
"BSD-2-Clause"
] | 11
|
2021-12-10T12:23:29.000Z
|
2022-03-13T08:40:20.000Z
|
nmigen_boards/nexys4ddr.py
|
lethalbit/nmigen-boards
|
aaf18252e457ff95257137da2a629820c0ff2bfa
|
[
"BSD-2-Clause"
] | 12
|
2021-12-11T18:51:29.000Z
|
2022-03-12T05:08:52.000Z
|
nmigen_boards/nexys4ddr.py
|
lethalbit/nmigen-boards
|
aaf18252e457ff95257137da2a629820c0ff2bfa
|
[
"BSD-2-Clause"
] | 7
|
2021-12-12T07:20:21.000Z
|
2022-03-06T06:20:55.000Z
|
from amaranth_boards.nexys4ddr import *
from amaranth_boards.nexys4ddr import __all__
import warnings
warnings.warn("instead of nmigen_boards.nexys4ddr, use amaranth_boards.nexys4ddr",
DeprecationWarning, stacklevel=2)
| 29.375
| 82
| 0.8
| 27
| 235
| 6.666667
| 0.555556
| 0.333333
| 0.383333
| 0.3
| 0.366667
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024752
| 0.140426
| 235
| 7
| 83
| 33.571429
| 0.866337
| 0
| 0
| 0
| 0
| 0
| 0.276596
| 0.208511
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
8f0f4916fc904d6938b34299f9220d0b0382effb
| 90
|
py
|
Python
|
03Aula09-09/ex02.py
|
danicon/Curso-IPE
|
3b9e2a9d187492d6561a512363bd06156286df6a
|
[
"MIT"
] | 2
|
2020-09-09T12:50:57.000Z
|
2020-09-09T12:56:02.000Z
|
03Aula09-09/ex02.py
|
danicon/Curso-IPE
|
3b9e2a9d187492d6561a512363bd06156286df6a
|
[
"MIT"
] | null | null | null |
03Aula09-09/ex02.py
|
danicon/Curso-IPE
|
3b9e2a9d187492d6561a512363bd06156286df6a
|
[
"MIT"
] | null | null | null |
print('\n \n')
x=0
while x <= 10:
print(f" 7 x {x} = {7*x}")
x+=1
print('\n \n')
| 11.25
| 30
| 0.411111
| 20
| 90
| 1.85
| 0.45
| 0.324324
| 0.378378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092308
| 0.277778
| 90
| 8
| 31
| 11.25
| 0.476923
| 0
| 0
| 0.333333
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
56d601b57cbbc92a4b74b42441468b44378b9105
| 2,040
|
py
|
Python
|
tests/python/test_for_break.py
|
kxxt/taichi
|
15f39b79c258080f1e34fcbdc29646d9ced0a4fe
|
[
"MIT"
] | 15
|
2020-01-29T19:07:19.000Z
|
2021-05-12T02:53:22.000Z
|
tests/python/test_for_break.py
|
kxxt/taichi
|
15f39b79c258080f1e34fcbdc29646d9ced0a4fe
|
[
"MIT"
] | 1
|
2020-02-08T02:11:58.000Z
|
2020-02-08T02:11:58.000Z
|
tests/python/test_for_break.py
|
kxxt/taichi
|
15f39b79c258080f1e34fcbdc29646d9ced0a4fe
|
[
"MIT"
] | 2
|
2020-01-31T20:10:35.000Z
|
2021-03-16T07:51:59.000Z
|
import taichi as ti
@ti.test()
def test_for_break():
x = ti.field(ti.i32)
N, M = 4, 4
ti.root.dense(ti.ij, (N, M)).place(x)
@ti.kernel
def func():
for i in range(N):
for j in range(M):
if j > i:
break
x[i, j] = 100 * i + j
func()
for i in range(N):
for j in range(M):
if j > i:
assert x[i, j] == 0
else:
assert x[i, j] == 100 * i + j
@ti.test()
def test_for_break2():
x = ti.field(ti.i32)
N, M = 8, 8
ti.root.dense(ti.ij, (N, M)).place(x)
@ti.kernel
def func():
for i in range(N):
for j in range(M):
x[i, j] = 100 * i + j
if j > i:
break
func()
for i in range(N):
for j in range(M):
if j > i + 1:
assert x[i, j] == 0
else:
assert x[i, j] == 100 * i + j
@ti.archs_excluding(ti.vulkan)
def test_for_break3():
x = ti.field(ti.i32)
N, M = 8, 8
ti.root.dense(ti.ij, (N, M)).place(x)
@ti.kernel
def func():
for i in range(N):
for j in range(i, M - i):
if i == 0:
break
x[i, j] = 100 * i + j
func()
for i in range(N):
for j in range(M):
if j < i or j >= M - i or i == 0:
assert x[i, j] == 0
else:
assert x[i, j] == 100 * i + j
@ti.test()
def test_for_break_complex():
x = ti.field(ti.i32)
N, M = 16, 32
ti.root.dense(ti.ij, (N, M)).place(x)
@ti.kernel
def func():
for i in range(1, N):
for j in range(3, M):
if j > i:
break
x[i, j] = 100 * i + j
func()
for i in range(N):
for j in range(M):
if i < 1 or j < 3 or j > i:
assert x[i, j] == 0
else:
assert x[i, j] == 100 * i + j
| 21.702128
| 45
| 0.384804
| 326
| 2,040
| 2.377301
| 0.125767
| 0.051613
| 0.046452
| 0.103226
| 0.859355
| 0.843871
| 0.8
| 0.76129
| 0.76129
| 0.76129
| 0
| 0.051163
| 0.473039
| 2,040
| 93
| 46
| 21.935484
| 0.669767
| 0
| 0
| 0.805195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103896
| 1
| 0.103896
| false
| 0
| 0.012987
| 0
| 0.116883
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
857f1a2bee4f85d70364e1221ab6c1cfaacc450a
| 8,942
|
py
|
Python
|
core/youtube.py
|
Gumbraise/HXYA
|
96c4af9a7da53fcd633b6103eb423b80bfd0cee3
|
[
"MIT"
] | 2
|
2020-03-24T15:40:04.000Z
|
2020-06-02T00:38:27.000Z
|
core/youtube.py
|
Gumbraise/HXYA
|
96c4af9a7da53fcd633b6103eb423b80bfd0cee3
|
[
"MIT"
] | null | null | null |
core/youtube.py
|
Gumbraise/HXYA
|
96c4af9a7da53fcd633b6103eb423b80bfd0cee3
|
[
"MIT"
] | null | null | null |
from lib.menu import clear, menu
import os, requests, json, time, sys, datetime
import lib.print as printMenu
try:
js = "file/youtube.json"
jsonFile = open(js)
keys = json.load(jsonFile)
jsonFile.close()
except (FileNotFoundError):
sys.exit(' youtube.json is missing. Reinstall HXYA here : https://github.com/gumbraise/HXYA')
s = str(datetime.datetime.now())
stwo = s.replace(":", "-")
API_KEY = keys["API_KEY"]
def likClo():
VideoId = str(input(" Paste the VideoId here: "))
url = 'https://www.googleapis.com/youtube/v3/videos?part=snippet%2CcontentDetails%2Cstatistics&id='+VideoId+'&key='+API_KEY
while True:
try:
number_limit = int(input(' How many likes do you want to close OBS ?: '))
break;
except:
print(" Please type a real number")
while True:
response = requests.get(url)
respJSON = response.json()
try:
number = int( respJSON['items'][0].get("statistics").get("likeCount") )
print(' '+str(datetime.datetime.now()) + " >>> " + str(number) + " likes")
if (number >= number_limit):
os.system("taskkill /im obs64.exe")
print(" OBS is taskkilled")
input(' Please type ENTER')
clear()
menu()
print(printMenu.youtubeMenu)
break
except:
try:
number = str( respJSON['error'].get("error").get("code") )
clear()
menu()
print(" An error as occured. Verify your API_KEY and update it")
input(' Please type ENTER')
clear()
menu()
print(printMenu.youtubeMenu)
break
except:
clear()
menu()
print(" An error as occured. The owner of this video desactivated likes")
input(' Please type ENTER')
clear()
menu()
print(printMenu.youtubeMenu)
break
time.sleep(60)
def disClo():
VideoId = str(input(" Paste the VideoId here: "))
url = 'https://www.googleapis.com/youtube/v3/videos?part=snippet%2CcontentDetails%2Cstatistics&id='+VideoId+'&key='+API_KEY
while True:
try:
number_limit = int(input(' How many dislikes do you want to close OBS ?: '))
break;
except:
print(" Please type a real number")
while True:
response = requests.get(url)
respJSON = response.json()
try:
number = int( respJSON['items'][0].get("statistics").get("dislikeCount") )
print(' '+str(datetime.datetime.now()) + " >>> " + str(number) + " dislike")
if (number >= number_limit):
os.system("taskkill /im obs64.exe")
print(" OBS is taskkilled")
input(' Please type ENTER')
clear()
menu()
print(printMenu.youtubeMenu)
break
except:
try:
number = str( respJSON['error'].get("error").get("code") )
clear()
menu()
print(" An error as occured. Verify your API_KEY and update it")
input(' Please type ENTER')
clear()
menu()
print(printMenu.youtubeMenu)
break
except:
clear()
menu()
print(" An error as occured. The owner of this video desactivated dislikes")
input(' Please type ENTER')
clear()
menu()
print(printMenu.youtubeMenu)
break
time.sleep(60)
def vieClo():
VideoId = str(input(" Paste the VideoId here: "))
url = 'https://www.googleapis.com/youtube/v3/videos?part=snippet%2CcontentDetails%2Cstatistics&id='+VideoId+'&key='+API_KEY
while True:
try:
number_limit = int(input(' How many views do you want to close OBS ?: '))
break;
except:
print(" Please type a real number")
while True:
response = requests.get(url)
respJSON = response.json()
try:
number = int( respJSON['items'][0].get("statistics").get("viewCount") )
print(' '+str(datetime.datetime.now()) + " >>> " + str(number) + " view")
if (number >= number_limit):
os.system("taskkill /im obs64.exe")
print(" OBS is taskkilled")
input(' Please type ENTER')
clear()
menu()
print(printMenu.youtubeMenu)
break
except:
try:
number = str( respJSON['error'].get("error").get("code") )
clear()
menu()
print(" An error as occured. Verify your API_KEY and update it")
input(' Please type ENTER')
clear()
menu()
print(printMenu.youtubeMenu)
break
except:
clear()
menu()
print(" An error as occured. The owner of this video desactivated views")
input(' Please type ENTER')
clear()
menu()
print(printMenu.youtubeMenu)
break
time.sleep(60)
def comClo():
VideoId = str(input(" Paste the VideoId here: "))
url = 'https://www.googleapis.com/youtube/v3/videos?part=snippet%2CcontentDetails%2Cstatistics&id='+VideoId+'&key='+API_KEY
while True:
try:
number_limit = int(input(' How many comments do you want to close OBS ?: '))
break;
except:
print(" Please type a real number")
while True:
response = requests.get(url)
respJSON = response.json()
try:
number = int( respJSON['items'][0].get("statistics").get("commentCount") )
print(' '+str(datetime.datetime.now()) + " >>> " + str(number) + " comments")
if (number >= number_limit):
os.system("taskkill /im obs64.exe")
print(" OBS is taskkilled")
input(' Please type ENTER')
clear()
menu()
print(printMenu.youtubeMenu)
break
except:
try:
number = str( respJSON['error'].get("error").get("code") )
clear()
menu()
print(" An error as occured. Verify your API_KEY and update it")
input(' Please type ENTER')
clear()
menu()
print(printMenu.youtubeMenu)
break
except:
clear()
menu()
print(" An error as occured. The owner of this video desactivated comments")
input(' Please type ENTER')
clear()
menu()
print(printMenu.youtubeMenu)
break
time.sleep(60)
def subClo():
ChannelId = str(input(' Paste the ChannelID here: '))
url = 'https://www.googleapis.com/youtube/v3/channels?part=statistics&id='+ChannelId+'&key='+API_KEY
while True:
try:
number_limit = int(input(' How many subscribers do you want to close OBS ?: '))
break;
except:
print(" Please type a real number")
while True:
response = requests.get(url)
respJSON = response.json()
try:
number = int( respJSON['items'][0].get("statistics").get("subscriberCount") )
print(' '+str(datetime.datetime.now()) + " >>> " + str(number) + " subscribers")
if (number >= number_limit):
os.system("taskkill /im obs64.exe")
print(" OBS is taskkilled")
input(' Please type ENTER')
clear()
menu()
print(printMenu.youtubeMenu)
break
except:
try:
number = str( respJSON['error'].get("error").get("code") )
clear()
menu()
print(" An error as occured. Verify your API_KEY and update it")
input(' Please type ENTER')
clear()
menu()
print(printMenu.youtubeMenu)
break
except:
clear()
menu()
print(" An error as occured. The owner of this channel desactivated subscribers")
input(' Please type ENTER')
clear()
menu()
print(printMenu.youtubeMenu)
break
time.sleep(60)
| 35.911647
| 127
| 0.491613
| 874
| 8,942
| 5.004577
| 0.143021
| 0.053498
| 0.080018
| 0.068587
| 0.866712
| 0.866712
| 0.866712
| 0.82556
| 0.817101
| 0.817101
| 0
| 0.007001
| 0.392977
| 8,942
| 249
| 128
| 35.911647
| 0.798821
| 0
| 0
| 0.832618
| 0
| 0.017167
| 0.26859
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021459
| false
| 0
| 0.012876
| 0
| 0.034335
| 0.175966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
85851e56a947f9c10284661e8e274b8b47b3b6bc
| 191
|
py
|
Python
|
opentimesheet/utils/uuid.py
|
valerymelou/opentimesheet-server
|
0da97ebb3c3e59962132d1bc5e83e1d727f7331b
|
[
"MIT"
] | null | null | null |
opentimesheet/utils/uuid.py
|
valerymelou/opentimesheet-server
|
0da97ebb3c3e59962132d1bc5e83e1d727f7331b
|
[
"MIT"
] | 95
|
2021-02-20T21:53:29.000Z
|
2022-01-14T17:24:50.000Z
|
opentimesheet/utils/uuid.py
|
valerymelou/opentimesheet-server
|
0da97ebb3c3e59962132d1bc5e83e1d727f7331b
|
[
"MIT"
] | null | null | null |
from base64 import urlsafe_b64encode
from uuid import uuid4
def uuid():
"""
Returns a URL safe UUID.
"""
return urlsafe_b64encode(uuid4().bytes).decode("ascii").rstrip("=")
| 19.1
| 71
| 0.670157
| 24
| 191
| 5.25
| 0.708333
| 0.253968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051948
| 0.193717
| 191
| 9
| 72
| 21.222222
| 0.766234
| 0.125654
| 0
| 0
| 0
| 0
| 0.039735
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a44f64156cff94718462166e3428bee67197e447
| 1,138
|
py
|
Python
|
temporalio/bridge/proto/workflow_activation/__init__.py
|
cretz/temporal-sdk-python
|
431ca1967d365556a9cf5aa9aac00243b71059f8
|
[
"MIT"
] | 55
|
2022-01-31T22:02:22.000Z
|
2022-03-30T11:17:21.000Z
|
temporalio/bridge/proto/workflow_activation/__init__.py
|
cretz/temporal-sdk-python
|
431ca1967d365556a9cf5aa9aac00243b71059f8
|
[
"MIT"
] | 7
|
2022-02-04T14:08:46.000Z
|
2022-03-22T13:27:30.000Z
|
temporalio/bridge/proto/workflow_activation/__init__.py
|
cretz/temporal-sdk-python
|
431ca1967d365556a9cf5aa9aac00243b71059f8
|
[
"MIT"
] | 4
|
2022-01-31T17:31:49.000Z
|
2022-03-29T01:04:46.000Z
|
from .workflow_activation_pb2 import (
CancelWorkflow,
FireTimer,
NotifyHasPatch,
QueryWorkflow,
RemoveFromCache,
ResolveActivity,
ResolveChildWorkflowExecution,
ResolveChildWorkflowExecutionStart,
ResolveChildWorkflowExecutionStartCancelled,
ResolveChildWorkflowExecutionStartFailure,
ResolveChildWorkflowExecutionStartSuccess,
ResolveRequestCancelExternalWorkflow,
ResolveSignalExternalWorkflow,
SignalWorkflow,
StartWorkflow,
UpdateRandomSeed,
WorkflowActivation,
WorkflowActivationJob,
)
__all__ = [
"CancelWorkflow",
"FireTimer",
"NotifyHasPatch",
"QueryWorkflow",
"RemoveFromCache",
"ResolveActivity",
"ResolveChildWorkflowExecution",
"ResolveChildWorkflowExecutionStart",
"ResolveChildWorkflowExecutionStartCancelled",
"ResolveChildWorkflowExecutionStartFailure",
"ResolveChildWorkflowExecutionStartSuccess",
"ResolveRequestCancelExternalWorkflow",
"ResolveSignalExternalWorkflow",
"SignalWorkflow",
"StartWorkflow",
"UpdateRandomSeed",
"WorkflowActivation",
"WorkflowActivationJob",
]
| 27.095238
| 50
| 0.764499
| 42
| 1,138
| 20.571429
| 0.571429
| 0.053241
| 0.085648
| 0.115741
| 0.960648
| 0.960648
| 0.960648
| 0.960648
| 0.960648
| 0.960648
| 0
| 0.001055
| 0.16696
| 1,138
| 41
| 51
| 27.756098
| 0.910338
| 0
| 0
| 0
| 0
| 0
| 0.364675
| 0.240773
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025
| 0
| 0.025
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a4a2391f98220ae0994b4b2770529c2877bb170f
| 241
|
py
|
Python
|
src/geomi/planar/mod2.py
|
IndianEngineeringDesignForum/geomi
|
dccff71c25a45c2c6e618a9c7af611762fe19176
|
[
"MIT"
] | null | null | null |
src/geomi/planar/mod2.py
|
IndianEngineeringDesignForum/geomi
|
dccff71c25a45c2c6e618a9c7af611762fe19176
|
[
"MIT"
] | null | null | null |
src/geomi/planar/mod2.py
|
IndianEngineeringDesignForum/geomi
|
dccff71c25a45c2c6e618a9c7af611762fe19176
|
[
"MIT"
] | null | null | null |
def func1():
"""This is func1 dummy docstring.
:External Ref:
* `Wikipedia <https://en.wikipedia.org/wiki/Main_Page>`_
* `Wikipedia, The Free Encyclopedia <https://en.wikipedia.org/wiki/Main_Page>`_
"""
pass
| 26.777778
| 87
| 0.626556
| 29
| 241
| 5.068966
| 0.655172
| 0.095238
| 0.217687
| 0.258503
| 0.421769
| 0.421769
| 0.421769
| 0
| 0
| 0
| 0
| 0.010638
| 0.219917
| 241
| 8
| 88
| 30.125
| 0.771277
| 0.792531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
f158f9e979e15336333052f3229b32a018e6cf9b
| 162
|
py
|
Python
|
first-homework.py
|
cnojiri/astr-119
|
e4fae90e75276c1800fda5bb559f988767c0d870
|
[
"MIT"
] | 1
|
2021-12-07T07:41:33.000Z
|
2021-12-07T07:41:33.000Z
|
first-homework.py
|
cnojiri/astr-119
|
e4fae90e75276c1800fda5bb559f988767c0d870
|
[
"MIT"
] | 6
|
2021-09-29T22:13:31.000Z
|
2021-11-24T16:00:50.000Z
|
first-homework.py
|
cnojiri/astr-119
|
e4fae90e75276c1800fda5bb559f988767c0d870
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#this program will write
#my name and pronouns!
print("Caitlyn Nojiri, pronouns:she/her") # print out Caitlyn Nojiri, pronouns: she/her
| 23.142857
| 87
| 0.740741
| 25
| 162
| 4.8
| 0.72
| 0.216667
| 0.35
| 0.4
| 0.45
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007194
| 0.141975
| 162
| 6
| 88
| 27
| 0.856115
| 0.679012
| 0
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
f164eed2ccc8fb92c18d0ec0f9700c83aa8bd18a
| 17,404
|
py
|
Python
|
project_main/finance/finance_flask.py
|
fantastic4ever/project2
|
cedd5f008df4cbd121ad0e275dbcae161d821671
|
[
"Apache-2.0"
] | null | null | null |
project_main/finance/finance_flask.py
|
fantastic4ever/project2
|
cedd5f008df4cbd121ad0e275dbcae161d821671
|
[
"Apache-2.0"
] | null | null | null |
project_main/finance/finance_flask.py
|
fantastic4ever/project2
|
cedd5f008df4cbd121ad0e275dbcae161d821671
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, request, Response
from eve.io.mongo import Validator
from pymongo import MongoClient
import finance_flask as ff
import urllib2, json, requests, boto3
import sys, subprocess, signal, time
import os
import re
#############
# Variables #
#############
global eve_process
global args
global sqs_client
app = Flask(__name__)
mongo_url = 'mongodb://admin:admin@ds033915.mongolab.com:33915/project2'
eve_base_url = ''
schema_course = {
"course_id": {
"type": "integer",
'min': 10000,
'max': 99999,
"required": True
},
"credit": {
"type": "integer",
'min': 0,
'max': 5,
"required": True
},
"unit_price": {
"type": "float",
'min': 0,
"required": True
}
}
########################
# Custom Error Handler #
########################
class MongoDbUnavailable(Exception):
pass
class StudentNotExists(Exception):
pass
class EveUnavailable(Exception):
pass
@app.errorhandler(MongoDbUnavailable)
def mongodb_failed_to_connect(error):
# return Response('{"_status": "ERR", "_error": {"message": "Failed to connect to mongodb", "code": 500}}', mimetype='application/json', status=500)
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), response.content)
@app.errorhandler(StudentNotExists)
def mongodb_configuration_unavailable(error):
# return Response('{"_status": "ERR", "_error": {"message": "Student with specified uni does not have record", "code": 404}}', mimetype='application/json', status=404)
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), response.content)
@app.errorhandler(EveUnavailable)
def mongodb_failed_to_connect(error):
# return Response('{"_status": "ERR", "_error": {"message": "Failed to connect to eve service", "code": 500}}', mimetype='application/json', status=500)
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), response.content)
@app.errorhandler(Exception)
def unexpected_failure(error):
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(error).__name__, error.args)
print message
# return Response('{"_status": "ERR", "_error": {"message": "Unexpected failure", "code": 500}}', mimetype='application/json', status=500)
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), response.content)
###############
# API Mapping #
###############
@app.route("/private/finance", methods = ['GET'])
def finance_read_all():
print 'Recieved GET finance request'
try:
response = requests.get(eve_base_url)
# return Response(response.content, mimetype='application/json', status=response.status_code)
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), response.content)
except Exception as e:
if type(e).__name__ == 'ConnectionError':
print 'Error: Cannot connect to eve'
raise EveUnavailable
else:
print 'Error: %s when read finance from eve' % (type(e).__name__)
raise e
@app.route("/private/finance", methods = ['POST'])
def finance_create_student():
print 'Recieved POST finance request'
try:
data = request.get_json(force = True)
# Calculate and update tuition
tuition = 0
if 'tuition' in data:
tuition = data['tuition']
if 'course_list' in data:
for course in data['course_list']:
tuition += course['credit'] * course['unit_price']
data.update({'tuition': tuition})
# Forward request to eve
headers = request.headers
response = requests.post(eve_base_url, data = json.dumps(data), headers = headers)
# return Response(response.content, mimetype='application/json', status=response.status_code)
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), response.content)
except Exception as e:
if type(e).__name__ == 'ConnectionError':
print 'Error: Cannot connect to eve'
raise EveUnavailable
else:
print 'Error: %s when add student finance info to eve' % (type(e).__name__)
raise e
@app.route("/private/finance", methods = ['DELETE'])
def finance_delete_all():
print 'Recieved DELETE finance request'
try:
response = requests.delete(eve_base_url)
# return Response(response.content, mimetype='application/json', status=response.status_code)
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), response.content)
except Exception as e:
if type(e).__name__ == 'ConnectionError':
print 'Error: Cannot connect to eve'
raise EveUnavailable
else:
print 'Error: %s when read finance from eve' % (type(e).__name__)
raise e
@app.route("/private/finance/<student_id>", methods = ['GET'])
def finance_read_student(student_id):
print 'Recieved GET student finance request'
try:
response = requests.get(eve_base_url + '/' + student_id)
# return Response(response.content, mimetype='application/json', status=response.status_code)
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), response.content)
except Exception as e:
if type(e).__name__ == 'ConnectionError':
print 'Error: Cannot connect to eve'
raise EveUnavailable
else:
print 'Error: %s when read student finance from eve' % (type(e).__name__)
raise e
@app.route("/private/finance/<student_id>", methods = ['PUT'])
def finance_update_student(student_id):
print 'Recieved PUT student finance request'
try:
response = requests.get(eve_base_url + '/' + student_id)
response_content = json.loads(response.content)
oid = response_content['_id']
data = request.get_json(force = True)
# Calculate and update tuition
tuition = 0
if 'tuition' in data:
tuition = data['tuition']
if 'course_list' in data:
for course in data['course_list']:
tuition += course['credit'] * course['unit_price']
data.update({'tuition': tuition})
# Forward request to eve
headers = request.headers
response = requests.put(eve_base_url + '/' + oid, data = json.dumps(data), headers = headers)
# return Response(response.content, mimetype='application/json', status=response.status_code)
print "nani"
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), response.content)
except Exception as e:
if type(e).__name__ == 'ConnectionError':
print 'Error: Cannot connect to eve'
raise EveUnavailable
else:
print 'Error: %s when update student finance from eve' % (type(e).__name__)
raise e
@app.route('/private/finance/<student_id>', methods=['DELETE'])
def finance_delete_student(student_id):
print 'Recieved DELETE student finance request'
try:
response = requests.get(eve_base_url + '/' + student_id)
response_content = json.loads(response.content)
oid = response_content['_id']
headers = request.headers
response = requests.delete(eve_base_url + '/' + oid, headers = headers)
# return Response(response.content, mimetype='application/json', status=response.status_code)
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), response.content)
except Exception as e:
if type(e).__name__ == 'ConnectionError':
print 'Error: Cannot connect to eve'
raise EveUnavailable
else:
print 'Error: %s when delete student finance from eve' % (type(e).__name__)
raise e
@app.route("/private/finance/<student_id>/courses", methods = ['POST'])
def add_to_course_list(student_id):
print 'Recieved POST courses request'
try:
courses_to_update = request.get_json(force = True)
print 'courses_to_update(type=%s) = %s' % (type(courses_to_update).__name__, courses_to_update)
# If data is not array, do not process
if type(courses_to_update).__name__ != 'list':
# return Response('{"_status": "ERR", "_error": {"message": "Data must be provided as list", "code": 400}}', mimetype='application/json', status=400)
response_msg = '{"_status": "ERR", "_error": {"message": "Data must be provided as list", "code": 400}}'
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), json.loads(response_msg))
# Get current course list
client = MongoClient(mongo_url)
db = client.project2
cursor = db.finance.find(
{
"student_id": student_id
}
)
if cursor.count() < 1:
print 'Abort: Student not exists'
raise StudentNotExists
course_id_list = []
if 'course_list' in cursor[0]:
for course in cursor[0]['course_list']:
course_id_list.append(course['course_id']);
print 'course_id_list = %s' % (course_id_list)
# Process only new courses
count_new_course = 0
count_old_course = 0
for course in courses_to_update:
print "course = %s" % (course)
# Validate course data format
v = Validator(schema_course)
if not v.validate(course):
# return Response('{"_status": "ERR", "_error": {"message": "Invalid data format of course; Please check API", "code": 400}}', mimetype='application/json', status=400)
response_msg = '{"_status": "ERR", "_error": {"message": "Invalid data format of course; Please check API", "code": 400}}'
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), json.loads(response_msg))
# Process only new courses
if course['course_id'] not in course_id_list:
print "\tadd"
result = db.finance.update(
{
"student_id": student_id
},
{
"$push": {
"course_list": course
},
"$currentDate": {"lastModified": True}
}
)
count_new_course += 1
else:
count_old_course += 1
client.close()
# return Response('{"_status": "SUCCESS", "_success": {"message": "'+str(count_new_course)+' course(s) added, '+str(count_old_course)+' course(s) already exist(s)", "code": 200}}', mimetype='application/json', status=200)
response_msg = '{"_status": "SUCCESS", "_success": {"message": "'+str(count_new_course)+' course(s) added, '+str(count_old_course)+' course(s) already exist(s)", "code": 200}}'
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), json.loads(response_msg))
except Exception as e:
if type(e).__name__ == 'ConnectionError':
print 'Error: Cannot connect to mongodb'
raise MongoDbUnavailable
else:
print 'Error: %s when adding course for %s' % (type(e).__name__, student_id)
raise e
@app.route("/private/finance/<student_id>/courses", methods = ['PUT'])
def update_course_list(student_id):
print 'Recieved PUT courses request'
try:
courses_to_update = request.get_json(force = True)
print 'courses_to_update(type=%s) = %s' % (type(courses_to_update).__name__, courses_to_update)
# If data is not array, do not process
if type(courses_to_update).__name__ != 'list':
# return Response('{"_status": "ERR", "_error": {"message": "Data must be provided as list", "code": 400}}', mimetype='application/json', status=400)
response_msg = '{"_status": "ERR", "_error": {"message": "Data must be provided as list", "code": 400}}'
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), json.loads(response_msg))
# Get current course list
client = MongoClient(mongo_url)
db = client.project2
cursor = db.finance.find(
{
"student_id": student_id
}
)
if cursor.count() < 1:
print 'Abort: Student not exists'
raise StudentNotExists
course_id_list = []
print
if 'course_list' in cursor[0]:
for course in cursor[0]['course_list']:
course_id_list.append(course['course_id']);
print 'course_id_list = %s' % (course_id_list)
# Process only existing courses
count_new_course = 0
count_old_course = 0
for course in courses_to_update:
print "course = %s" % (course)
# Validate course data format
v = Validator(schema_course)
if not v.validate(course):
# return Response('{"_status": "ERR", "_error": {"message": "Invalid data format of course; Please check API", "code": 400}}', mimetype='application/json', status=400)
response_msg = '{"_status": "ERR", "_error": {"message": "Invalid data format of course; Please check API", "code": 400}}'
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), json.loads(response_msg))
# Process only existing courses
if course['course_id'] in course_id_list:
print "\tupdate"
result = db.finance.update(
{
"student_id": student_id,
"course_list.course_id": course['course_id']
},
{
"$set": {
"course_list.$": course
},
"$currentDate": {"lastModified": True}
}
)
count_old_course += 1
else:
count_new_course += 1
client.close()
# return Response('{"_status": "SUCCESS", "_success": {"message": "'+str(count_old_course)+' course(s) updated, '+str(count_new_course)+' course(s) do(es) not exist", "code": 200}}', mimetype='application/json', status=200)
response_msg = '{"_status": "SUCCESS", "_success": {"message": "'+str(count_old_course)+' course(s) updated, '+str(count_new_course)+' course(s) do(es) not exist", "code": 200}}'
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), json.loads(response_msg))
except Exception as e:
if type(e).__name__ == 'ConnectionError':
print 'Error: Cannot connect to mongodb'
raise MongoDbUnavailable
else:
print 'Error: %s when adding course for %s' % (type(e).__name__, student_id)
raise e
@app.route("/private/finance/<student_id>/courses", methods = ['DELETE'])
def delete_from_course_list(student_id):
print 'Recieved DELETE courses request'
try:
courses_to_update = request.get_json(force = True)
print 'courses_to_update(type=%s) = %s' % (type(courses_to_update).__name__, courses_to_update)
# If data is not array, do not process
if type(courses_to_update).__name__ != 'list':
# return Response('{"_status": "ERR", "_error": {"message": "Data must be provided as list", "code": 400}}', mimetype='application/json', status=400)
response_msg = '{"_status": "ERR", "_error": {"message": "Data must be provided as list", "code": 400}}'
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), json.loads(response_msg))
# Get current course list
client = MongoClient(mongo_url)
db = client.project2
cursor = db.finance.find(
{
"student_id": student_id
}
)
if cursor.count() < 1:
print 'Abort: Student not exists'
raise StudentNotExists
course_id_list = []
print
if 'course_list' in cursor[0]:
for course in cursor[0]['course_list']:
course_id_list.append(course['course_id']);
print 'course_id_list = %s' % (course_id_list)
# Process only existing courses
count_new_course = 0
count_old_course = 0
for course_id in courses_to_update:
print "course_id = %s" % (course_id)
# Process only existing courses
if course_id in course_id_list:
print "\tremove"
result = db.finance.update(
{
"student_id": student_id,
"course_list.course_id": course_id
},
{
"$unset": {
"course_list.$": ""
},
"$currentDate": {"lastModified": True}
}
)
count_old_course += 1
else:
count_new_course += 1
client.close()
# return Response('{"_status": "SUCCESS", "_success": {"message": "'+str(count_old_course)+' course(s) updated, '+str(count_new_course)+' course(s) do(es) not exist", "code": 200}}', mimetype='application/json', status=200)
response_msg = '{"_status": "SUCCESS", "_success": {"message": "'+str(count_old_course)+' course(s) removed, '+str(count_new_course)+' course(s) do(es) not exist", "code": 200}}'
return send_to_response_queue(request.headers.get('Response-url'), request.headers.get('Request-id'), json.loads(response_msg))
except Exception as e:
if type(e).__name__ == 'ConnectionError':
print 'Error: Cannot connect to mongodb'
raise MongoDbUnavailable
else:
print 'Error: %s when adding course for %s' % (type(e).__name__, student_id)
raise e
def send_to_response_queue(resp_queue_url, req_id, json_object):
print(resp_queue_url)
response = sqs_client.send_message(QueueUrl = resp_queue_url, MessageBody = 'boto3', MessageAttributes = {
'ReturnValue': {
'StringValue': json.dumps(json_object),
'DataType': 'String'
},
'RequestID':{
'StringValue':req_id,
'DataType': 'String'
}
})
return response['MD5OfMessageBody']
def start_eve_process(args):
print "starting finance eve process..."
ff.eve_process = subprocess.Popen(args)
def stop_eve_process(args):
print "stopping finance eve process..."
os.kill(ff.eve_process.pid, signal.SIGTERM)
if __name__ == "__main__":
if(len(sys.argv) >= 3):
sqs_client = boto3.client('sqs')
host = sys.argv[1]
eve_port = str((int(sys.argv[2]) + 10000))
args = ['python', 'finance_eve.py', host, eve_port]
start_eve_process(args)
eve_base_url = 'http://' + host + ':' + eve_port + '/private/finance'
app.run(host=host, port=int(sys.argv[2]))
| 39.110112
| 225
| 0.687888
| 2,272
| 17,404
| 5.048415
| 0.099032
| 0.047602
| 0.053357
| 0.031473
| 0.83313
| 0.807672
| 0.784656
| 0.770881
| 0.76748
| 0.764342
| 0
| 0.011154
| 0.160308
| 17,404
| 444
| 226
| 39.198198
| 0.77371
| 0.18105
| 0
| 0.591667
| 0
| 0.013889
| 0.263169
| 0.026657
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.008333
| 0.022222
| null | null | 0.136111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2d3b9e0c4b487bd1a6fd0b551952044c09b3bc70
| 23,803
|
py
|
Python
|
sdk/compute/azure-mgmt-vmwarecloudsimple/azure/mgmt/vmwarecloudsimple/operations/_dedicated_cloud_services_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/compute/azure-mgmt-vmwarecloudsimple/azure/mgmt/vmwarecloudsimple/operations/_dedicated_cloud_services_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/compute/azure-mgmt-vmwarecloudsimple/azure/mgmt/vmwarecloudsimple/operations/_dedicated_cloud_services_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 2
|
2020-05-11T17:10:27.000Z
|
2021-01-02T16:15:35.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class DedicatedCloudServicesOperations(object):
"""DedicatedCloudServicesOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2019-04-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-04-01"
self.config = config
def list_by_subscription(
self, filter=None, top=None, skip_token=None, custom_headers=None, raw=False, **operation_config):
"""Implements list of dedicatedCloudService objects within subscription
method.
Returns list of dedicated cloud services within a subscription.
:param filter: The filter to apply on the list operation
:type filter: str
:param top: The maximum number of record sets to return
:type top: int
:param skip_token: to be used by nextLink implementation
:type skip_token: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DedicatedCloudService
:rtype:
~azure.mgmt.vmwarecloudsimple.models.DedicatedCloudServicePaged[~azure.mgmt.vmwarecloudsimple.models.DedicatedCloudService]
:raises:
:class:`CSRPErrorException<azure.mgmt.vmwarecloudsimple.models.CSRPErrorException>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CSRPErrorException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.DedicatedCloudServicePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.VMwareCloudSimple/dedicatedCloudServices'}
def list_by_resource_group(
self, resource_group_name, filter=None, top=None, skip_token=None, custom_headers=None, raw=False, **operation_config):
"""Implements list of dedicatedCloudService objects within RG method.
Returns list of dedicated cloud services within a resource group.
:param resource_group_name: The name of the resource group
:type resource_group_name: str
:param filter: The filter to apply on the list operation
:type filter: str
:param top: The maximum number of record sets to return
:type top: int
:param skip_token: to be used by nextLink implementation
:type skip_token: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DedicatedCloudService
:rtype:
~azure.mgmt.vmwarecloudsimple.models.DedicatedCloudServicePaged[~azure.mgmt.vmwarecloudsimple.models.DedicatedCloudService]
:raises:
:class:`CSRPErrorException<azure.mgmt.vmwarecloudsimple.models.CSRPErrorException>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CSRPErrorException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.DedicatedCloudServicePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VMwareCloudSimple/dedicatedCloudServices'}
def get(
self, resource_group_name, dedicated_cloud_service_name, custom_headers=None, raw=False, **operation_config):
"""Implements dedicatedCloudService GET method.
Returns Dedicate Cloud Service.
:param resource_group_name: The name of the resource group
:type resource_group_name: str
:param dedicated_cloud_service_name: dedicated cloud Service name
:type dedicated_cloud_service_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DedicatedCloudService or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.vmwarecloudsimple.models.DedicatedCloudService or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`CSRPErrorException<azure.mgmt.vmwarecloudsimple.models.CSRPErrorException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'dedicatedCloudServiceName': self._serialize.url("dedicated_cloud_service_name", dedicated_cloud_service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CSRPErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DedicatedCloudService', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VMwareCloudSimple/dedicatedCloudServices/{dedicatedCloudServiceName}'}
def create_or_update(
self, resource_group_name, dedicated_cloud_service_name, dedicated_cloud_service_request, custom_headers=None, raw=False, **operation_config):
"""Implements dedicated cloud service PUT method.
Create dedicate cloud service.
:param resource_group_name: The name of the resource group
:type resource_group_name: str
:param dedicated_cloud_service_name: dedicated cloud Service name
:type dedicated_cloud_service_name: str
:param dedicated_cloud_service_request: Create Dedicated Cloud Service
request
:type dedicated_cloud_service_request:
~azure.mgmt.vmwarecloudsimple.models.DedicatedCloudService
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DedicatedCloudService or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.vmwarecloudsimple.models.DedicatedCloudService or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`CSRPErrorException<azure.mgmt.vmwarecloudsimple.models.CSRPErrorException>`
"""
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'dedicatedCloudServiceName': self._serialize.url("dedicated_cloud_service_name", dedicated_cloud_service_name, 'str', pattern=r'^[a-zA-Z0-9]([-_.a-zA-Z0-9]*[a-zA-Z0-9])?$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(dedicated_cloud_service_request, 'DedicatedCloudService')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CSRPErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DedicatedCloudService', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VMwareCloudSimple/dedicatedCloudServices/{dedicatedCloudServiceName}'}
def _delete_initial(
self, resource_group_name, dedicated_cloud_service_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'dedicatedCloudServiceName': self._serialize.url("dedicated_cloud_service_name", dedicated_cloud_service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.CSRPErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
header_dict = {
'Content-Type': 'str',
}
client_raw_response.add_headers(header_dict)
return client_raw_response
def delete(
self, resource_group_name, dedicated_cloud_service_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Implements dedicatedCloudService DELETE method.
Delete dedicate cloud service.
:param resource_group_name: The name of the resource group
:type resource_group_name: str
:param dedicated_cloud_service_name: dedicated cloud service name
:type dedicated_cloud_service_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`CSRPErrorException<azure.mgmt.vmwarecloudsimple.models.CSRPErrorException>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
dedicated_cloud_service_name=dedicated_cloud_service_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Content-Type': 'str',
})
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VMwareCloudSimple/dedicatedCloudServices/{dedicatedCloudServiceName}'}
def update(
self, resource_group_name, dedicated_cloud_service_name, tags=None, custom_headers=None, raw=False, **operation_config):
"""Implements dedicatedCloudService PATCH method.
Patch dedicated cloud service's properties.
:param resource_group_name: The name of the resource group
:type resource_group_name: str
:param dedicated_cloud_service_name: dedicated cloud service name
:type dedicated_cloud_service_name: str
:param tags: The tags key:value pairs
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DedicatedCloudService or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.vmwarecloudsimple.models.DedicatedCloudService or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`CSRPErrorException<azure.mgmt.vmwarecloudsimple.models.CSRPErrorException>`
"""
dedicated_cloud_service_request = models.PatchPayload(tags=tags)
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'dedicatedCloudServiceName': self._serialize.url("dedicated_cloud_service_name", dedicated_cloud_service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(dedicated_cloud_service_request, 'PatchPayload')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CSRPErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DedicatedCloudService', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VMwareCloudSimple/dedicatedCloudServices/{dedicatedCloudServiceName}'}
| 48.281947
| 198
| 0.676553
| 2,491
| 23,803
| 6.246487
| 0.092332
| 0.030077
| 0.048586
| 0.04338
| 0.846915
| 0.832712
| 0.825771
| 0.819794
| 0.80662
| 0.782069
| 0
| 0.003174
| 0.23224
| 23,803
| 492
| 199
| 48.380081
| 0.848263
| 0.289039
| 0
| 0.706827
| 0
| 0.004016
| 0.165799
| 0.104361
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052209
| false
| 0
| 0.02008
| 0
| 0.140562
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
746cdcc082966ad5d9d0efbb94109a189319ba67
| 10,415
|
py
|
Python
|
mcp/api/server.py
|
fkmclane/MCP
|
e80c3e12c163e9a67870d83340f434c5ed94e075
|
[
"MIT"
] | null | null | null |
mcp/api/server.py
|
fkmclane/MCP
|
e80c3e12c163e9a67870d83340f434c5ed94e075
|
[
"MIT"
] | 15
|
2015-06-13T22:37:25.000Z
|
2018-07-07T12:56:35.000Z
|
mcp/api/server.py
|
lilyinstarlight/MCP
|
e80c3e12c163e9a67870d83340f434c5ed94e075
|
[
"MIT"
] | null | null | null |
import os
import signal
import fooster.web
import fooster.web.query
import mcp.error
import mcp.common.http
import mcp.model.server
class Index(mcp.common.http.AuthHandler):
group = 0
def do_get(self):
return 200, [dict(server) for server in mcp.model.server.items() if server.server in self.auth.servers or self.auth.admin]
def do_post(self):
if not self.auth.admin:
raise fooster.web.HTTPError(403)
try:
mcp.model.server.create(self.request.body['server'], self.request.body['source'], self.request.body['library'], self.request.body['revision'] if 'revision' in self.request.body else None, self.request.body['port'] if 'port' in self.request.body else None, self.request.body['autostart'] if 'autostart' in self.request.body else None)
mcp.model.server.wait(self.request.body['server'])
except (KeyError, TypeError):
raise fooster.web.HTTPError(400)
except mcp.error.NoSourceError:
raise fooster.web.HTTPError(400)
except mcp.error.NoLibraryError:
raise fooster.web.HTTPError(400)
except mcp.error.InvalidServerError:
raise fooster.web.HTTPError(403)
except mcp.error.ServerExistsError:
raise fooster.web.HTTPError(409)
self.response.headers['Location'] = '/api/server/' + self.request.body['server']
return 201, dict(mcp.model.server.get(self.request.body['server']))
class Server(mcp.common.http.AuthHandler):
group = 1
def do_get(self):
try:
if not self.auth.admin and self.auth.username not in mcp.model.server.get(self.groups[0]).users:
raise fooster.web.HTTPError(404)
except AttributeError:
raise fooster.web.HTTPError(404)
return 200, dict(mcp.model.server.get(self.groups[0]))
def do_post(self):
try:
if not self.auth.admin and self.auth.username not in mcp.model.server.get(self.groups[0]).users:
raise fooster.web.HTTPError(404)
except AttributeError:
raise fooster.web.HTTPError(404)
try:
mcp.model.server.send(self.groups[0], self.request.body['command'])
mcp.model.server.wait(self.groups[0])
except mcp.error.NoServerError:
raise fooster.web.HTTPError(404)
return 204, None
def do_put(self):
try:
if not self.auth.admin and self.auth.username not in mcp.model.server.get(self.groups[0]).users:
raise fooster.web.HTTPError(404)
except AttributeError:
raise fooster.web.HTTPError(404)
if not self.auth.admin and ('port' in self.request.body or 'source' in self.request.body or 'library' in self.request.body or 'revision' in self.request.body):
raise fooster.web.HTTPError(403)
try:
mcp.model.server.modify(self.groups[0], self.request.body['library'] if 'library' in self.request.body else None, self.request.body['port'] if 'port' in self.request.body else None, self.request.body['autostart'] if 'autostart' in self.request.body else None, self.request.body['users'] if 'users' in self.request.body else None)
if 'source' in self.request.body or 'revision' in self.request.body:
mcp.model.server.upgrade(self.groups[0], self.request.body['source'] if 'source' in self.request.body else None, self.request.body['revision'] if 'revision' in self.request.body else None)
mcp.model.server.stop(self.groups[0])
mcp.model.server.wait(self.groups[0])
if 'running' in self.request.body and self.request.body['running']:
mcp.model.server.start(self.groups[0])
mcp.model.server.wait(self.groups[0])
except mcp.error.NoServerError:
raise fooster.web.HTTPError(404)
return 200, dict(mcp.model.server.get(self.groups[0]))
def do_delete(self):
try:
if not self.auth.admin and self.auth.username not in mcp.model.server.get(self.groups[0]).users:
raise fooster.web.HTTPError(404)
except AttributeError:
raise fooster.web.HTTPError(404)
if not self.auth.admin:
raise fooster.web.HTTPError(403)
try:
mcp.model.server.stop(self.groups[0])
mcp.model.server.wait(self.groups[0])
mcp.model.server.destroy(self.groups[0])
except mcp.error.NoServerError:
raise fooster.web.HTTPError(404)
return 204, None
class Settings(mcp.common.http.PlainAuthHandler):
group = 1
def do_get(self):
try:
if not self.auth.admin and self.auth.username not in mcp.model.server.get(self.groups[0]).users:
raise fooster.web.HTTPError(404)
except AttributeError:
raise fooster.web.HTTPError(404)
self.response.headers['Content-Type'] = 'text/plain'
try:
return 200, mcp.model.server.settings_get(self.groups[0])
except mcp.error.NoServerError:
raise fooster.web.HTTPError(404)
def do_put(self):
try:
if not self.auth.admin and self.auth.username not in mcp.model.server.get(self.groups[0]).users:
raise fooster.web.HTTPError(404)
except AttributeError:
raise fooster.web.HTTPError(404)
self.response.headers['Content-Type'] = 'text/plain'
try:
mcp.model.server.settings_set(self.groups[0], self.request.body)
return 200, mcp.model.server.settings_get(self.groups[0])
except mcp.error.NoServerError:
raise fooster.web.HTTPError(404)
def do_delete(self):
try:
if not self.auth.admin and self.auth.username not in mcp.model.server.get(self.groups[0]).users:
raise fooster.web.HTTPError(404)
except AttributeError:
raise fooster.web.HTTPError(404)
try:
mcp.model.server.settings_remove(self.groups[0])
except mcp.error.NoServerError:
raise fooster.web.HTTPError(404)
return 204, None
class Log(mcp.common.http.PlainAuthHandler):
group = 1
def do_get(self):
try:
if not self.auth.admin and self.auth.username not in mcp.model.server.get(self.groups[0]).users:
raise fooster.web.HTTPError(404)
except AttributeError:
raise fooster.web.HTTPError(404)
self.response.headers['Content-Type'] = 'text/plain'
try:
try:
return 200, mcp.model.server.log_get(self.groups[0], int(self.request.query['last']) if 'last' in self.request.query else None)
except mcp.error.LastLogLine:
return 204, ''
except mcp.error.NoLogLine:
return 201, mcp.model.server.log_get(self.groups[0])
except mcp.error.NoServerError:
raise fooster.web.HTTPError(404)
class Script(mcp.common.http.PlainAuthHandler):
group = 1
def do_get(self):
try:
if not self.auth.admin and self.auth.username not in mcp.model.server.get(self.groups[0]).users:
raise fooster.web.HTTPError(404)
except AttributeError:
raise fooster.web.HTTPError(404)
self.response.headers['Content-Type'] = 'application/x-python-code'
try:
return 200, mcp.model.server.script_get(self.groups[0])
except mcp.error.NoServerError:
raise fooster.web.HTTPError(404)
def do_put(self):
try:
if not self.auth.admin and self.auth.username not in mcp.model.server.get(self.groups[0]).users:
raise fooster.web.HTTPError(404)
except AttributeError:
raise fooster.web.HTTPError(404)
self.response.headers['Content-Type'] = 'application/x-python-code'
try:
mcp.model.server.script_stop(self.groups[0])
mcp.model.server.wait(self.groups[0])
if self.request.body:
mcp.model.server.script_set(self.groups[0], self.request.body)
mcp.model.server.script_start(self.groups[0])
mcp.model.server.wait(self.groups[0])
return 200, mcp.model.server.script_get(self.groups[0])
except mcp.error.NoServerError:
raise fooster.web.HTTPError(404)
def do_delete(self):
try:
if not self.auth.admin and self.auth.username not in mcp.model.server.get(self.groups[0]).users:
raise fooster.web.HTTPError(404)
except AttributeError:
raise fooster.web.HTTPError(404)
try:
mcp.model.server.script_stop(self.groups[0])
mcp.model.server.wait(self.groups[0])
mcp.model.server.script_remove(self.groups[0])
except mcp.error.NoServerError:
raise fooster.web.HTTPError(404)
return 204, ''
class ScriptLog(mcp.common.http.PlainAuthHandler):
group = 1
def do_get(self):
try:
if not self.auth.admin and self.auth.username not in mcp.model.server.get(self.groups[0]).users:
raise fooster.web.HTTPError(404)
except AttributeError:
raise fooster.web.HTTPError(404)
self.response.headers['Content-Type'] = 'text/plain'
try:
try:
return 200, mcp.model.server.script_log_get(self.groups[0], int(self.request.query['last']) if 'last' in self.request.query else None)
except mcp.error.LastLogLine:
return 204, ''
except mcp.error.NoLogLine:
return 201, mcp.model.server.script_log_get(self.groups[0])
except mcp.error.NoServerError:
raise fooster.web.HTTPError(404)
routes = {'/api/server/' + fooster.web.query.regex: Index, '/api/server/(' + mcp.model.server.servers_allowed + ')' + fooster.web.query.regex: Server, '/api/server/(' + mcp.model.server.servers_allowed + ')/settings' + fooster.web.query.regex: Settings, '/api/server/(' + mcp.model.server.servers_allowed + ')/log' + fooster.web.query.regex: Log, '/api/server/(' + mcp.model.server.servers_allowed + ')/script' + fooster.web.query.regex: Script, '/api/server/(' + mcp.model.server.servers_allowed + ')/script/log' + fooster.web.query.regex: ScriptLog}
| 39.60076
| 551
| 0.631109
| 1,365
| 10,415
| 4.789011
| 0.07619
| 0.064862
| 0.113508
| 0.157871
| 0.869206
| 0.838458
| 0.810005
| 0.775738
| 0.743766
| 0.726939
| 0
| 0.029599
| 0.247432
| 10,415
| 262
| 552
| 39.751908
| 0.804414
| 0
| 0
| 0.781095
| 0
| 0
| 0.050696
| 0.004801
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069652
| false
| 0
| 0.034826
| 0.004975
| 0.253731
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7782f5a03b20520425dfd291307c405b0e8f5ece
| 14,176
|
py
|
Python
|
test/test_domains.py
|
Radico/python-sparkpost
|
0c2953d17449c93bacb381058485d2bae4fad6f2
|
[
"Apache-2.0"
] | null | null | null |
test/test_domains.py
|
Radico/python-sparkpost
|
0c2953d17449c93bacb381058485d2bae4fad6f2
|
[
"Apache-2.0"
] | 1
|
2020-10-26T02:37:59.000Z
|
2020-10-26T02:37:59.000Z
|
test/test_domains.py
|
Radico/python-sparkpost
|
0c2953d17449c93bacb381058485d2bae4fad6f2
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import responses
from sparkpost import SparkPost
from sparkpost.exceptions import SparkPostAPIException
from sparkpost.domains import SendingDomainStatus
@responses.activate
def test_success_list_sending_domains():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/sending-domains',
status=200,
content_type='application/json',
body="""{
"results": [
{
"domain": "example1.com",
"tracking_domain": "click.example1.com",
"status": {
"ownership_verified": true,
"spf_status": "unverified",
"abuse_at_status": "unverified",
"dkim_status": "valid",
"cname_status": "valid",
"mx_status": "unverified",
"compliance_status": "valid",
"postmaster_at_status": "unverified",
"verification_mailbox_status": "valid",
"verification_mailbox": "susan.calvin"
},
"shared_with_subaccounts": false,
"is_default_bounce_domain": false
}
]
}"""
)
sp = SparkPost('fake-key')
results = sp.sending_domains.list()
assert len(results) == 1
assert results[0]['domain'] == 'example1.com'
@responses.activate
def test_success_list_sending_domains_with_filters():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/sending-domains?dkim_status=valid',
status=200,
content_type='application/json',
match_querystring=True,
body="""{
"results": [
{
"domain": "example1.com",
"tracking_domain": "click.example1.com",
"status": {
"ownership_verified": true,
"spf_status": "unverified",
"abuse_at_status": "unverified",
"dkim_status": "valid",
"cname_status": "valid",
"mx_status": "unverified",
"compliance_status": "valid",
"postmaster_at_status": "unverified",
"verification_mailbox_status": "valid",
"verification_mailbox": "susan.calvin"
},
"shared_with_subaccounts": false,
"is_default_bounce_domain": false
}
]
}"""
)
sp = SparkPost('fake-key')
results = sp.sending_domains.list(dkim_status=SendingDomainStatus.VALID)
assert len(results) == 1
@responses.activate
def test_fail_list_sending_domains():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/sending-domains',
status=500,
content_type='application/json',
body="""
{"errors": [{"message": "You failed", "description": "More Info"}]}
"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost('fake-key')
sp.sending_domains.list()
@responses.activate
def test_success_get_sending_domain():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/sending-domains/example1.com',
status=200,
content_type='application/json',
body="""{
"results": {
"tracking_domain": "click.example1.com",
"status": {
"ownership_verified": false,
"spf_status": "unverified",
"abuse_at_status": "unverified",
"dkim_status": "unverified",
"cname_status": "unverified",
"mx_status": "pending",
"compliance_status": "pending",
"postmaster_at_status": "unverified",
"verification_mailbox_status": "unverified"
},
"dkim": {
"headers": "from:to:subject:date",
"public": "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC+W6scd3XWwvC/hPRksfDYFi3ztgyS9OSqnnjtNQeDdTSD1DRx/xFar2wjmzxp2+SnJ5pspaF77VZveN3P/HVmXZVghr3asoV9WBx/uW1nDIUxU35L4juXiTwsMAbgMyh3NqIKTNKyMDy4P8vpEhtH1iv/BrwMdBjHDVCycB8WnwIDAQAB",
"selector": "hello_selector"
},
"shared_with_subaccounts": false,
"is_default_bounce_domain": false
}
}"""
)
sp = SparkPost('fake-key')
result = sp.sending_domains.get('example1.com')
assert result is not None
@responses.activate
def test_not_found_get_sending_domain():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/sending-domains/foo.com',
status=404,
content_type='application/json',
body="""{"errors": [{"message": "Resource could not be found"}]}"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost('fake-key')
sp.sending_domains.get('foo.com')
@responses.activate
def test_success_delete_sending_domain():
responses.add(
responses.DELETE,
'https://api.sparkpost.com/api/v1/sending-domains/example1.com',
status=204,
content_type='application/json'
)
sp = SparkPost('fake-key')
results = sp.sending_domains.delete('example1.com')
assert results is True
@responses.activate
def test_not_found_delete_sending_domain():
responses.add(
responses.DELETE,
'https://api.sparkpost.com/api/v1/sending-domains/foo.com',
status=404,
content_type='application/json',
body="""{"errors": [{"message": "Resource could not be found"}]}"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost('fake-key')
sp.sending_domains.delete('foo.com')
@responses.activate
def test_success_update_sending_domain():
responses.add(
responses.PUT,
'https://api.sparkpost.com/api/v1/sending-domains/example1.com',
status=200,
content_type='application/json',
body="""{
"results": {
"message": "Successfully Updated Domain.",
"domain": "example1.com"
}
}"""
)
sp = SparkPost('fake-key')
results = sp.sending_domains.update('example1.com',
tracking_domain='tr.example1.com',
shared_with_subaccounts=True,
is_default_bounce_domain=True)
assert results is not None
assert results['message'] == 'Successfully Updated Domain.'
@responses.activate
def test_success_update_dkim_sending_domain():
responses.add(
responses.PUT,
'https://api.sparkpost.com/api/v1/sending-domains/example1.com',
status=200,
content_type='application/json',
body="""{
"results": {
"message": "Successfully Updated Domain.",
"domain": "example1.com"
}
}"""
)
sp = SparkPost('fake-key')
results = sp.sending_domains.update('example1.com',
dkim_private='foo',
dkim_public='bar',
dkim_selector='baz')
assert results is not None
assert results['message'] == 'Successfully Updated Domain.'
@responses.activate
def test_failure_update_dkim_sending_domain():
with pytest.raises(ValueError):
sp = SparkPost('fake-key')
results = sp.sending_domains.update('example1.com',
dkim_private='foo',
dkim_public='bar')
@responses.activate
def test_success_create_sending_domain():
responses.add(
responses.POST,
'https://api.sparkpost.com/api/v1/sending-domains',
status=200,
content_type='application/json',
body="""{
"results": {
"message": "Successfully Created domain.",
"domain": "example1.com",
"dkim": {
"public": "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC+W6scd3XWwvC/hPRksfDYFi3ztgyS9OSqnnjtNQeDdTSD1DRx/xFar2wjmzxp2+SnJ5pspaF77VZveN3P/HVmXZVghr3asoV9WBx/uW1nDIUxU35L4juXiTwsMAbgMyh3NqIKTNKyMDy4P8vpEhtH1iv/BrwMdBjHDVCycB8WnwIDAQAB",
"selector": "scph0316",
"signing_domain": "example1.com",
"headers": "from:to:subject:date"
}
}
}"""
)
sp = SparkPost('fake-key')
results = sp.sending_domains.create('example1.com',
tracking_domain='click.example1.com',
shared_with_subaccounts=False)
assert results is not None
assert results['domain'] == 'example1.com'
@responses.activate
def test_failure_create_sending_domain_already_exists():
responses.add(
responses.POST,
'https://api.sparkpost.com/api/v1/sending-domains',
status=409,
content_type='application/json',
body="""{
"errors": [
{
"message": "resource conflict",
"description": "Sending Domain <example1.com> already registered",
"code": "1602"
}
]
}"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost('fake-key')
sp.sending_domains.create('example1.com')
@responses.activate
def test_success_create_sending_domain_with_dkim():
responses.add(
responses.POST,
'https://api.sparkpost.com/api/v1/sending-domains',
status=200,
content_type='application/json',
body="""{
"results": {
"message": "Successfully Created domain.",
"domain": "example1.com",
"dkim": {
"public": "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC+W6scd3XWwvC/hPRksfDYFi3ztgyS9OSqnnjtNQeDdTSD1DRx/xFar2wjmzxp2+SnJ5pspaF77VZveN3P/HVmXZVghr3asoV9WBx/uW1nDIUxU35L4juXiTwsMAbgMyh3NqIKTNKyMDy4P8vpEhtH1iv/BrwMdBjHDVCycB8WnwIDAQAB",
"selector": "scph0316",
"signing_domain": "example1.com",
"headers": "from:to:subject:date"
}
}
}"""
)
sp = SparkPost('fake-key')
results = sp.sending_domains.create('example1.com',
dkim_private='foo',
dkim_public='bar',
dkim_selector='baz',
generate_dkim=False)
assert results is not None
assert results['domain'] == 'example1.com'
@responses.activate
def test_failure_create_sending_domain_with_dkim():
with pytest.raises(ValueError):
sp = SparkPost('fake-key')
results = sp.sending_domains.create('example1.com',
dkim_private='foo',
dkim_public='bar',
generate_dkim=False)
@responses.activate
def test_success_list_tracking_domains():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/tracking-domains',
status=200,
content_type='application/json',
body="""{
"results": [
{
"port": 443,
"domain": "example.domain.com",
"secure": true,
"default": true,
"status": {
"verified": false,
"cname_status": "pending",
"compliance_status": "pending"
}
},
{
"port": 80,
"domain": "example2.domain.com",
"secure": false,
"default": false,
"status": {
"verified": true,
"cname_status": "valid",
"compliance_status": "valid"
},
"subaccount_id": 215
}
]
}"""
)
sp = SparkPost('fake-key')
results = sp.tracking_domains.list()
assert len(results) == 2
assert results[0]['domain'] == 'example.domain.com'
@responses.activate
def test_success_list_tracking_domains_subaccounts():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/tracking-domains?subaccounts=foo,bar',
status=200,
content_type='application/json',
match_querystring=True,
body="""{
"results": [
{
"port": 80,
"domain": "example2.domain.com",
"secure": false,
"default": false,
"status": {
"verified": true,
"cname_status": "valid",
"compliance_status": "valid"
},
"subaccount_id": 215
}
]
}"""
)
sp = SparkPost('fake-key')
results = sp.tracking_domains.list(subaccounts=['foo', 'bar'])
assert len(results) == 1
assert results[0]['domain'] == 'example2.domain.com'
@responses.activate
def test_fail_list_tracking_domains():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/tracking-domains',
status=500,
content_type='application/json',
body="""
{"errors": [{"message": "You failed", "description": "More Info"}]}
"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost('fake-key')
sp.tracking_domains.list()
@responses.activate
def test_success_get_tracking_domain():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/tracking-domains/example1.com',
status=200,
content_type='application/json',
body="""{
"results": {
"port": 443,
"domain": "example.domain.com",
"secure": true,
"default": true,
"status": {
"verified": false,
"cname_status": "pending",
"compliance_status": "pending"
}
}
}"""
)
sp = SparkPost('fake-key')
result = sp.tracking_domains.get('example1.com')
assert result is not None
@responses.activate
def test_not_found_get_tracking_domain():
responses.add(
responses.GET,
'https://api.sparkpost.com/api/v1/tracking-domains/foo.com',
status=404,
content_type='application/json',
body="""{"errors": [{"message": "Resource could not be found"}]}"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost('fake-key')
sp.tracking_domains.get('foo.com')
@responses.activate
def test_success_delete_tracking_domain():
responses.add(
responses.DELETE,
'https://api.sparkpost.com/api/v1/tracking-domains/example1.com',
status=204,
content_type='application/json'
)
sp = SparkPost('fake-key')
results = sp.tracking_domains.delete('example1.com')
assert results is True
@responses.activate
def test_not_found_delete_tracking_domain():
responses.add(
responses.DELETE,
'https://api.sparkpost.com/api/v1/tracking-domains/foo.com',
status=404,
content_type='application/json',
body="""{"errors": [{"message": "Resource could not be found"}]}"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost('fake-key')
sp.tracking_domains.delete('foo.com')
| 29.719078
| 235
| 0.609128
| 1,386
| 14,176
| 6.062049
| 0.098846
| 0.044513
| 0.049988
| 0.059986
| 0.92966
| 0.911569
| 0.896096
| 0.878838
| 0.816472
| 0.810283
| 0
| 0.021056
| 0.252892
| 14,176
| 476
| 236
| 29.781513
| 0.772259
| 0
| 0
| 0.719626
| 0
| 0.002336
| 0.475522
| 0.091704
| 0
| 0
| 0
| 0
| 0.044393
| 1
| 0.049065
| false
| 0
| 0.011682
| 0
| 0.060748
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bb0f0d157907be1ca8a8dc4a9583c758ad405ae1
| 6,333
|
py
|
Python
|
tests/test_compare_util.py
|
Sanz009/dictcompare
|
78ba15d85b8f9eec29d5d20258366642997cb9c2
|
[
"MIT"
] | null | null | null |
tests/test_compare_util.py
|
Sanz009/dictcompare
|
78ba15d85b8f9eec29d5d20258366642997cb9c2
|
[
"MIT"
] | null | null | null |
tests/test_compare_util.py
|
Sanz009/dictcompare
|
78ba15d85b8f9eec29d5d20258366642997cb9c2
|
[
"MIT"
] | null | null | null |
import unittest
__author__ = 'Sanz009'
from compare.compare_util import compare_values, compare_values_with_case_sensitivity
class CompareUtilsTest(unittest.TestCase):
def test_compare_values_int_1_true(self):
value_1 = 45
value_2 = 45
self.assertEqual(compare_values(value_1, value_2), True)
def test_compare_values_int_2_false(self):
value_1 = 45
value_2 = 450
self.assertEqual(compare_values(value_1, value_2), False)
def test_compare_values_str_1_true(self):
"""
case_sensitivity is False by default so this comparison will be true
"""
value_1 = "Stratosphere"
value_2 = "stratosphere"
self.assertEqual(compare_values(value_1, value_2), True)
def test_compare_values_with_case_sensitivity_str_1_true(self):
"""
Case_sensitivity is set to True, so this comparison will fail. - Assert 1
Case_sensitivity is set to False, so 2nd assert will be True.
"""
value_1 = "Stratosphere"
value_2 = "stratosphere"
self.assertEqual(compare_values(value_1, value_2), True)
self.assertEqual(compare_values_with_case_sensitivity(value_1, value_2, False), True)
self.assertEqual(compare_values_with_case_sensitivity(value_1, value_2, True), False)
self.assertEqual(compare_values(value_1, value_2), True)
def test_compare_value_int_str_mix(self):
"""
Try to compare string and integer, returns False always.
"""
value_1 = 40
value_2 = "stratosphere"
self.assertEqual(compare_values(value_1, value_2), False)
self.assertEqual(compare_values_with_case_sensitivity(value_1, value_2, True), False)
self.assertEqual(compare_values_with_case_sensitivity(value_1, value_2, False), False)
def test_compare_values_list_1_true(self):
value_1 = [1, 2, 3, "a"]
value_2 = [2, 1, 3, "A"]
self.assertEqual(compare_values(value_1, value_2), True)
self.assertEqual(compare_values_with_case_sensitivity(value_1, value_2, True), False)
self.assertEqual(compare_values_with_case_sensitivity(value_1, value_2, False), True)
def test_compare_values_list_2_dictionary_items_true(self):
value_1 = [1, 2, 3, "a", {"a": 1, "b": 2}, {"c": 2, "d": 3}]
value_2 = [2, 1, 3, "A", {"c": 2, "d": 3}, {"a": 1, "b": 2}]
self.assertEqual(compare_values(value_1, value_2), True)
self.assertEqual(compare_values_with_case_sensitivity(value_1, value_2, True), False)
self.assertEqual(compare_values_with_case_sensitivity(value_1, value_2, False), True)
def test_compare_values_list_3_dictionary_items_false(self):
value_1 = [1, 2, 3, "a", {"a": 1, "b": 2}, {"c": 2, "d": 3}]
value_2 = [2, 1, 3, "A", {"c": 2, "d": 3}, {"f": 1, "b": 2}]
self.assertEqual(compare_values(value_1, value_2), False)
self.assertEqual(compare_values_with_case_sensitivity(value_1, value_2, True), False)
self.assertEqual(compare_values_with_case_sensitivity(value_1, value_2, False), False)
def test_compare_values_list_4_list_items(self):
value_1 = [[1, 2], [2, 3], [3, 4]]
value_2 = [[3, 2], [2, 1], [4, 3]]
value_3 = [[1, 2], [2, 3], [3, 44]]
self.assertEqual(compare_values(value_1, value_2), True)
self.assertEqual(compare_values(value_1, value_3), False)
def test_compare_values_dictionary_1_nested_dict_value(self):
"""
Comparison when there is a dictionary as a value for a key in a dictionary
"""
value_1 = {1: {"a": 1, "b": 2}, 2: {"c": 2, "d": 3}}
value_2 = {2: {"c": 2, "d": 3}, 1: {"b": 2, "a": 1}}
value_3 = {1: {"c": 2, "d": 3}, 2: {"b": 2, "a": 1}}
self.assertEqual(compare_values(value_1, value_2), True)
self.assertEqual(compare_values(value_1, value_3), False)
def test_compare_values_dictionary_2_list_items(self):
value_1 = {1: [{"a": 1, "b": 2}, {"y": [1, 2, 3]}], 2: [{"c": 2, "d": 3}]}
value_2 = {2: [{"c": 2, "d": 3}], 1: [{"y": [2, 3, 1]}, {"b": 2, "a": 1}]}
value_3 = {1: [{"c": 2, "d": 3}], 2: [{"b": 2, "a": 1}]}
self.assertEqual(compare_values(value_1, value_2), True)
self.assertEqual(compare_values(value_1, value_3), False)
def test_compare_values_dictionary_2_mix(self):
value_1 = {"a": {"b": {"c": [1, 2, 3], "d": [4, 'a', [1, 2, 3]]}, "e": "END OF B"}, "f": "END OF A"}
value_2 = {"f": "end OF A", "a": {"e": "end OF B", "b": {"d": [4, 'a', [2, 1, 3]], "c": [3, 2, 1]}}}
self.assertEqual(compare_values(value_1, value_2), True)
self.assertEqual(compare_values_with_case_sensitivity(value_1, value_2, True), False)
self.assertEqual(compare_values_with_case_sensitivity(value_1, value_2, False), True)
def test_compare_values_bool_1(self):
value_1 = [[1, True], [2, 3], [3, 4]]
value_2 = [[3, 2], [True, 1], [4, 3]]
value_3 = [[1, False], [2, 3], [3, 44]]
self.assertEqual(compare_values(value_1, value_2), True)
self.assertEqual(compare_values(value_1, value_3), False)
def test_compare_values_dict_5_bool_val(self):
value_1 = {1: [{"a": True, "b": 2}, {"y": [1, 2, 3]}], 2: [{"c": 2, "d": 3}]}
value_2 = {2: [{"c": 2, "d": 3}], 1: [{"y": [2, 3, 1]}, {"b": 2, "a": True}]}
value_3 = {1: [{"c": 2, "d": 3}], 2: [{"b": 2, "a": True}]}
self.assertEqual(compare_values(value_1, value_2), True)
self.assertEqual(compare_values(value_1, value_3), False)
def test_compare_values_empty_list_variants(self):
self.assertEqual(compare_values([], []), True)
self.assertEqual(compare_values([{}], [{}]), True)
self.assertEqual(compare_values([{}, {}], [{}, {}]), True)
self.assertEqual(compare_values([{"a": [{}]}], [{"a": [{}]}]), True)
self.assertEqual(compare_values([{"a": []}], [{"a": []}]), True)
self.assertEqual(compare_values([], [{}]), False)
self.assertEqual(compare_values([{}], []), False)
self.assertEqual(compare_values([{}], [{"a": []}]), False)
self.assertEqual(compare_values([{"a": []}], [{"a": [1, 2, 3]}]), False)
self.assertEqual(compare_values([{"a": [{}]}], [{"a": []}]), False)
| 49.093023
| 108
| 0.607611
| 924
| 6,333
| 3.867965
| 0.083333
| 0.210968
| 0.258534
| 0.329043
| 0.849468
| 0.809177
| 0.781198
| 0.739787
| 0.725797
| 0.69474
| 0
| 0.055287
| 0.21459
| 6,333
| 128
| 109
| 49.476563
| 0.663249
| 0.053055
| 0
| 0.43617
| 0
| 0
| 0.031208
| 0
| 0
| 0
| 0
| 0
| 0.446809
| 1
| 0.159574
| false
| 0
| 0.021277
| 0
| 0.191489
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
24d769ea9491cdda7bde500ac0d3477470834712
| 201
|
py
|
Python
|
spiral/cli/templates/generate/project/{{ label }}/core/version.py
|
acdaniells/spiral
|
d78344007969d7c991216901b4a9d3ad7d768587
|
[
"BSD-3-Clause"
] | null | null | null |
spiral/cli/templates/generate/project/{{ label }}/core/version.py
|
acdaniells/spiral
|
d78344007969d7c991216901b4a9d3ad7d768587
|
[
"BSD-3-Clause"
] | 1
|
2020-04-01T18:39:48.000Z
|
2020-04-01T18:39:48.000Z
|
spiral/cli/templates/generate/project/{{ label }}/core/version.py
|
acdaniells/spiral
|
d78344007969d7c991216901b4a9d3ad7d768587
|
[
"BSD-3-Clause"
] | 1
|
2020-04-01T18:36:44.000Z
|
2020-04-01T18:36:44.000Z
|
"""{{ label }} core version module."""
from spiral import get_version as spiral_get_version
VERSION = (0, 0, 1, "alpha", 0)
def get_version(version=VERSION):
return spiral_get_version(version)
| 20.1
| 52
| 0.716418
| 29
| 201
| 4.758621
| 0.482759
| 0.289855
| 0.369565
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023529
| 0.154229
| 201
| 9
| 53
| 22.333333
| 0.788235
| 0.159204
| 0
| 0
| 0
| 0
| 0.030675
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
24dcc5944b8ca6867f1074e30f9caf61e19bad92
| 42
|
py
|
Python
|
experiments/src/tuning/__init__.py
|
chrislybaer/huggingmolecules
|
210239ac46b467e900a47e8f4520054636744ca6
|
[
"Apache-2.0"
] | 60
|
2021-05-07T16:07:26.000Z
|
2022-03-26T19:23:54.000Z
|
experiments/src/tuning/__init__.py
|
gabegomes/huggingmolecules
|
adc581c97fbc21d9967dd9334afa94b22fb77651
|
[
"Apache-2.0"
] | 11
|
2021-05-07T16:01:35.000Z
|
2022-03-09T13:06:05.000Z
|
experiments/src/tuning/__init__.py
|
gabegomes/huggingmolecules
|
adc581c97fbc21d9967dd9334afa94b22fb77651
|
[
"Apache-2.0"
] | 12
|
2021-05-20T08:02:25.000Z
|
2022-03-10T14:11:36.000Z
|
from .tuning_tune_hyper import tune_hyper
| 21
| 41
| 0.880952
| 7
| 42
| 4.857143
| 0.714286
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
70912141d6b2354545212d05dd4c44ac748248be
| 853
|
py
|
Python
|
quotes.py
|
mkseth4774/ine-guide-to-network-programmability-python-course-files
|
35c49dfcf8e8f1b69435987a00fb9a236b803d9f
|
[
"MIT"
] | null | null | null |
quotes.py
|
mkseth4774/ine-guide-to-network-programmability-python-course-files
|
35c49dfcf8e8f1b69435987a00fb9a236b803d9f
|
[
"MIT"
] | null | null | null |
quotes.py
|
mkseth4774/ine-guide-to-network-programmability-python-course-files
|
35c49dfcf8e8f1b69435987a00fb9a236b803d9f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
This is my file to print the Task #2 Answers...
More information...
-----------------------------------------------------------
"""
print('He said, "Quote \ can\'t work as \\\\ I don\'t believe" but \\n does!')
print("He said, \"Quote \ can't work as \\\\ I don't believe\" but \\n does!")
print('"What if \\\\\\ and \\\\n and \'\\\\\\\\\' were all "quoted" here?"')
print("\"What if \\\\\\ and \\\\n and '\\\\\\\\' were all \"quoted\" here?\"")
print('To print the "\\" character we need \'\\\\\' don\'t we?')
print("To print the \"\\\" character we need '\\\\' don't we?")
print('\'He said, "\\\\t" is better than "\\\\\\\\t" but can\'t (won\'t) be as good as "\\\\\\\' I think???\'')
print("'He said, \"\\\\t\" is better than \"\\\\\\\\t\" but can't (won't) be as good as \"\\\\\\' I think???'")
##
## End of file...
| 37.086957
| 111
| 0.483001
| 127
| 853
| 3.244094
| 0.370079
| 0.067961
| 0.106796
| 0.07767
| 0.820388
| 0.820388
| 0.820388
| 0.820388
| 0.820388
| 0.820388
| 0
| 0.002774
| 0.154748
| 853
| 22
| 112
| 38.772727
| 0.568655
| 0.194607
| 0
| 0
| 0
| 0
| 0.534125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
3b3fbb5ffae8628208db76821a64ab4fa799547a
| 5,299
|
py
|
Python
|
LempelZivModified.py
|
defalt18/Deep-Compression
|
f70acd42dbacbb98fb5f2da167a682936b746d61
|
[
"Apache-2.0"
] | 1
|
2019-05-16T11:24:55.000Z
|
2019-05-16T11:24:55.000Z
|
LempelZivModified.py
|
defalt18/Deep-Compression
|
f70acd42dbacbb98fb5f2da167a682936b746d61
|
[
"Apache-2.0"
] | 1
|
2019-06-18T19:39:24.000Z
|
2019-06-18T19:39:24.000Z
|
LempelZivModified.py
|
defalt18/Deep-Compression
|
f70acd42dbacbb98fb5f2da167a682936b746d61
|
[
"Apache-2.0"
] | 1
|
2019-06-18T17:14:24.000Z
|
2019-06-18T17:14:24.000Z
|
#############################
######## Lempel Ziv #########
#############################
import sys
from sys import argv
from struct import *
def compress():
input_file = input("Input file: ")
n = input("No. of bytes: ")
maximum_table_size = pow(2,int(n))
file = open(input_file)
data = file.read()
# Building and initializing the dictionary.
dictionary_size = 256
dictionary = {chr(i): i for i in range(dictionary_size)}
string = "" # String is null.
compressed_data = [] # variable to store the compressed data.
# iterating through the input symbols.
# LZW Compression algorithm
for symbol in data:
string_plus_symbol = string + symbol # get input symbol.
if string_plus_symbol in dictionary:
string = string_plus_symbol
else:
compressed_data.append(dictionary[string])
if(len(dictionary) <= maximum_table_size):
dictionary[string_plus_symbol] = dictionary_size
dictionary_size += 1
string = symbol
if string in dictionary:
compressed_data.append(dictionary[string])
# storing the compressed string into a file (byte-wise).
out = input_file.split(".")[0]
output_file = open(out + ".lzw", "wb")
for data in compressed_data:
output_file.write(pack('>H',int(data)))
output_file.close()
file.close()
def decompress():
input_file = input("Input file: ")
n = input("No. of bytes: ")
maximum_table_size = pow(2,int(n))
file = open(input_file, "rb")
compressed_data = []
next_code = 256
decompressed_data = ""
string = ""
# Reading the compressed file.
while True:
rec = file.read(2)
if len(rec) != 2:
break
(data, ) = unpack('>H', rec)
compressed_data.append(data)
# Building and initializing the dictionary.
dictionary_size = 256
dictionary = dict([(x, chr(x)) for x in range(dictionary_size)])
# iterating through the codes.
# LZW Decompression algorithm
for code in compressed_data:
if not (code in dictionary):
dictionary[code] = string + (string[0])
decompressed_data += dictionary[code]
if not(len(string) == 0):
dictionary[next_code] = string + (dictionary[code][0])
next_code += 1
string = dictionary[code]
# storing the decompressed string into a file.
out = input_file.split(".")[0]
output_file = open(out + "_decoded.txt", "w")
for data in decompressed_data:
output_file.write(data)
output_file.close()
file.close()
def modified_compress(data):
x = data.decode("utf-8", errors="ignore")
n = 1000
maximum_table_size = pow(2,int(n))
# Building and initializing the dictionary.
dictionary_size = 256
dictionary = {chr(i): i for i in range(dictionary_size)}
string = "" # String is null.
compressed_data = [] # variable to store the compressed data.
# iterating through the input symbols.
# LZW Compression algorithm
for symbol in x:
string_plus_symbol = string + symbol # get input symbol.
if string_plus_symbol in dictionary:
string = string_plus_symbol
else:
compressed_data.append(dictionary[string])
if(len(dictionary) <= maximum_table_size):
dictionary[string_plus_symbol] = dictionary_size
dictionary_size += 1
string = str(symbol)
if string in dictionary:
compressed_data.append(dictionary[string])
# storing the compressed string into a file (byte-wise).
input_file = "a.txt"
out = input_file.split(".")[0]
output_file = open(out + ".lzw", "wb")
for data in compressed_data:
output_file.write(pack('>H',int(data)))
output_file.close()
def modified_decompress():
input_file = "a.lzw"
n = 1000
maximum_table_size = pow(2,int(n))
file = open(input_file, "rb")
compressed_data = []
next_code = 256
decompressed_data = ""
string = ""
# Reading the compressed file.
while True:
rec = file.read(2)
if len(rec) != 2:
break
(data, ) = unpack('>H', rec)
compressed_data.append(data)
# Building and initializing the dictionary.
dictionary_size = 256
dictionary = dict([(x, chr(x)) for x in range(dictionary_size)])
# iterating through the codes.
# LZW Decompression algorithm
for code in compressed_data:
if not (code in dictionary):
dictionary[code] = string + (string[0])
decompressed_data += dictionary[code]
if not(len(string) == 0):
dictionary[next_code] = string + (dictionary[code][0])
next_code += 1
string = dictionary[code]
output_file = open("am_decoded.txt", "wb")
s = ""
for data in decompressed_data:
s = s + data
x = s.encode()
b = bytearray()
for i in x:
b.append(i)
print(b)
output_file.write(bytes(b))
file.close()
output_file.close()
return x
| 29.769663
| 68
| 0.584261
| 626
| 5,299
| 4.798722
| 0.166134
| 0.074567
| 0.04261
| 0.0253
| 0.875166
| 0.858522
| 0.858522
| 0.844208
| 0.844208
| 0.821238
| 0
| 0.0129
| 0.297792
| 5,299
| 177
| 69
| 29.937853
| 0.79441
| 0.14682
| 0
| 0.777778
| 0
| 0
| 0.029172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031746
| false
| 0
| 0.02381
| 0
| 0.063492
| 0.007937
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3b6c344c864041392b2865f04bab59f077b497c6
| 108,780
|
py
|
Python
|
tests.py
|
classner/barrista
|
230ad0ecfdac22aa95b38e5aeedc73fcd625a94a
|
[
"MIT"
] | 76
|
2015-10-07T12:02:33.000Z
|
2019-03-11T04:00:53.000Z
|
tests.py
|
classner/barrista
|
230ad0ecfdac22aa95b38e5aeedc73fcd625a94a
|
[
"MIT"
] | 6
|
2015-10-14T03:40:10.000Z
|
2018-07-31T18:39:02.000Z
|
tests.py
|
classner/barrista
|
230ad0ecfdac22aa95b38e5aeedc73fcd625a94a
|
[
"MIT"
] | 17
|
2015-10-14T00:38:15.000Z
|
2018-08-13T04:01:05.000Z
|
"""Unittests for the barrista project."""
# pylint: disable=F0401, C0330, C0302, C0103, R0201, R0914, R0915, W0212
# pylint: disable=no-name-in-module, no-member
import unittest
import logging
logging.basicConfig(level=logging.WARN)
try:
import cv2 # pylint: disable=W0611
CV2_AVAILABLE = True
except ImportError:
CV2_AVAILABLE = False
class NetSpecificationTestCase(unittest.TestCase):
"""Tests the :py:class:`barrista.design.NetSpecification` class."""
def test_initialization(self):
"""Test initialization and checks."""
import barrista.design as design
# Basic init works.
_ = design.NetSpecification([[2, 2]])
_ = design.NetSpecification([[2, 2, 2, 2]])
# Checks work.
with self.assertRaises(AssertionError):
_ = design.NetSpecification([[2, 2, 2, 2], [2, 2]])
with self.assertRaises(AssertionError):
_ = design.NetSpecification([[2, 2]],
predict_inputs=['test'])
with self.assertRaises(AssertionError):
_ = design.NetSpecification([[2, 2]],
predict_input_shapes=[[2, 2]])
with self.assertRaises(AssertionError):
_ = design.NetSpecification([[2, 2]],
predict_inputs=['test'],
predict_input_shapes=[[]])
_ = design.NetSpecification([[10, 3, 51, 51], [10]], # noqa
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3]])
def test_get_predict_net_specification(self):
"""Test the method ``get_predict_net_specification``."""
import barrista.design as design
with self.assertRaises(AssertionError):
netspec = design.NetSpecification([[2, 2]])
netspec.get_predict_net_specification()
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3]])
pred_netspec = netspec.get_predict_net_specification()
self.assertEqual(pred_netspec.name, netspec.name)
self.assertEqual(pred_netspec.debug_info, netspec.debug_info)
self.assertEqual(pred_netspec.stages, ['predict'])
self.assertEqual(pred_netspec.level, netspec.level)
self.assertEqual(pred_netspec.phase, design.Phase.TEST)
self.assertEqual(pred_netspec.force_backward, False)
self.assertEqual(pred_netspec.layers, netspec.layers)
self.assertEqual(pred_netspec.inputs, netspec.predict_inputs)
self.assertEqual(pred_netspec.input_shape,
netspec.predict_input_shapes)
def test_to_pbuf_message(self):
"""Test the method ``to_pbuf_message``."""
import barrista.design as design
from barrista.design import ConvolutionLayer, ReLULayer
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
netspec_msg = netspec.to_pbuf_message()
self.assertEqual(netspec_msg.IsInitialized(), True)
self.assertEqual(netspec_msg.input, netspec.inputs)
if hasattr(netspec_msg, 'input_shape'):
for msgshape, specshape in zip(netspec_msg.input_shape,
netspec.input_shape):
self.assertEqual(list(msgshape.dim), specshape)
self.assertEqual(len(netspec_msg.layer), len(netspec.layers))
self.assertEqual(netspec_msg.state.phase, netspec.phase)
self.assertEqual(netspec_msg.state.level, netspec.level)
self.assertEqual(netspec_msg.state.stage, netspec.stages)
self.assertEqual(netspec_msg.name, netspec.name)
self.assertEqual(netspec_msg.debug_info, netspec.debug_info)
def test_prototxt_conversion(self):
"""Test the prototxt conversion methods."""
import barrista.design as design
from barrista.design import ConvolutionLayer, ReLULayer
import tempfile
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
_ = netspec.instantiate()
netspec_rl = design.NetSpecification.from_prototxt(
netspec.to_prototxt())
# Since we have the test for `to_pbuf_message`, we can assume the
# conversion to prototxt works correctly.
self.assertEqual(netspec_rl.to_prototxt(), netspec.to_prototxt())
# Test file io.
with tempfile.NamedTemporaryFile(mode='r',
suffix=".prototxt") as tmpfile:
netspec.to_prototxt(output_filename=tmpfile.name)
tmpfile.file.flush()
netspec_rl = design.NetSpecification.from_prototxt(
filename=tmpfile.name)
# Test instantiation of a loaded net.
_ = netspec_rl.instantiate() # noqa
def test_copy(self):
"""Test the method ``copy``."""
import barrista.design as design
from barrista.design import ConvolutionLayer, ReLULayer
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[2, 3, 2, 2]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
_ = netspec.instantiate()
netptext = netspec.to_prototxt()
netspec_copy = netspec.copy()
_ = netspec_copy.instantiate() # noqa
netcptext = netspec_copy.to_prototxt()
self.assertEqual(netptext, netcptext)
def test_visualize(self):
"""Test the ``visualize`` function."""
import barrista.design as design
# pylint: disable=W0212
if design._draw is None:
return
from barrista.design import ConvolutionLayer, ReLULayer
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[2, 3, 2, 2]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
viz = netspec.visualize()
self.assertEqual(viz.ndim, 3)
def test_instantiate(self):
"""Test the method ``instatiate``."""
import barrista.design as design
from barrista.design import ConvolutionLayer, ReLULayer
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
_ = netspec.instantiate() # noqa
class LayerSpecificationTestCase(unittest.TestCase):
"""Test the class :py:class:`barrista.design.LayerSpecification`."""
def test_instantiation(self):
"""Test instantiation."""
import barrista.design as design
import copy
sspec = design.LayerSpecification() # noqa
self.assertTrue(sspec == sspec)
cspec = copy.deepcopy(sspec)
self.assertTrue(sspec == cspec)
cspec.include_min_level = 2
self.assertTrue(sspec != cspec)
def test_to_pbuf(self):
"""Test protobuf conversion."""
import barrista.design as design
layerspec = design.LayerSpecification()
with self.assertRaises(AssertionError):
# It is not possible to create an abstract layer without type.
_ = layerspec.to_pbuf_message(0, # noqa
None,
['data'])
# Check the wiring.
layerspec.type = 'convolution'
pbmessage = layerspec.to_pbuf_message(0,
None,
['data'])
self.assertEqual(pbmessage.name, '_layer_0')
self.assertEqual(pbmessage.top[0], '_layer_0')
self.assertEqual(pbmessage.bottom[0], 'data')
layerspec2 = design.LayerSpecification()
layerspec2.type = 'convolution'
pbmessage2 = layerspec2.to_pbuf_message(1,
layerspec,
['data'])
self.assertEqual(pbmessage2.name, '_layer_1')
self.assertEqual(pbmessage2.top[0], '_layer_1')
self.assertEqual(pbmessage2.bottom[0], '_layer_0')
class MonitoringTestCase(unittest.TestCase):
"""Test the monitors."""
def test_ProgressIndicator(self):
"""Test the ``ProgressIndicator``."""
import barrista.design as design
import numpy as np
from barrista.design import ConvolutionLayer, ReLULayer
from barrista.monitoring import ProgressIndicator
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
net = netspec.instantiate()
# For fit.
fitpi = ProgressIndicator()
solver = _solver.SGDSolver(
base_lr=0.01)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(10,
solver,
X,
train_callbacks=[fitpi])
if hasattr(fitpi.pbar, 'finished'):
# progressbar2 compatibility.
self.assertEqual(fitpi.pbar.finished, True)
# For predict.
predpi = ProgressIndicator()
net.predict(np.zeros((20, 3, 3, 3)),
test_callbacks=[predpi])
if hasattr(predpi.pbar, 'finished'):
self.assertEqual(predpi.pbar.finished, True)
def test_JSONLogger(self):
"""Test the ``JSONLogger``."""
import tempfile
import shutil
import os
import barrista.design as design
import numpy as np
import json
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
from barrista.monitoring import JSONLogger
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(
name='loss',
bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
dirpath = tempfile.mkdtemp()
# For fit.
fitlog = JSONLogger(dirpath,
'tmp',
{'test': ['test_loss',
'test_accuracy'],
'train': ['train_loss',
'train_accuracy']})
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01)
net.fit(30,
solver,
X=X,
X_val=X,
test_initialization=True,
test_interval=10,
train_callbacks=[fitlog],
test_callbacks=[fitlog])
# append.
fitlog = JSONLogger(dirpath,
'tmp',
{'test': ['test_loss',
'test_accuracy'],
'train': ['train_loss',
'train_accuracy']})
net.fit(30,
solver,
X=X,
X_val=X,
test_initialization=True,
test_interval=10,
train_callbacks=[fitlog],
test_callbacks=[fitlog])
with open(os.path.join(dirpath, 'barrista_tmp.json'), 'r') as inf:
json_load = json.load(inf)
self.assertIn('train', list(json_load.keys()))
self.assertIn('test', list(json_load.keys()))
self.assertEqual(len(json_load['train']), 12)
self.assertEqual(len(json_load['test']), 16)
shutil.rmtree(dirpath)
# Verify values.
predres = net.predict(X,
out_blob_names=['loss', 'accuracy'],
allow_train_phase_for_test=True)
last_test_loss = [dct['test_loss'] for dct in json_load['test']
if 'test_loss' in dct.keys() and
dct['NumIters'] == 60][0]
self.assertEqual(last_test_loss, predres['loss'][0])
def test_StaticDataMonitor(self):
"""Test the static data monitor."""
import barrista.design as design
import numpy as np
from barrista.monitoring import StaticDataMonitor
netspec = design.NetSpecification([[3], [3]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
tmon = StaticDataMonitor(X={'a': np.array(range(3)),
'b': np.array(range(5, 8))})
tmon_test = StaticDataMonitor(X={'a': np.array(range(3)),
'b': np.array(range(5, 8))})
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
assert len(tmon.get_parallel_blob_names()) == 2
kwargs['callback_signal'] = 'initialize_test'
tmon_test._initialize_test(kwargs)
kwargs['callback_signal'] = 'pre_fit'
tmon._pre_fit(kwargs)
tmon._pre_train_batch({'net': net})
assert np.all(net.blobs['a'].data[...] == [0, 1, 2])
assert np.all(net.blobs['b'].data[...] == [5, 6, 7])
tmon._pre_train_batch({'net': net})
assert np.all(net.blobs['a'].data[...] == [0, 1, 2])
assert np.all(net.blobs['b'].data[...] == [5, 6, 7])
kwargs['callback_signal'] = 'pre_test'
tmon_test._pre_test(kwargs)
tmon_test._pre_test_batch({'testnet': net})
assert np.all(net.blobs['a'].data[...] == [0, 1, 2])
assert np.all(net.blobs['b'].data[...] == [5, 6, 7])
def test_CyclingDataMonitor(self):
"""Test the cycling data monitor."""
import barrista.design as design
import numpy as np
from barrista.monitoring import CyclingDataMonitor
netspec = design.NetSpecification([[3], [3]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
tmon = CyclingDataMonitor(X={'a': list(range(4)),
'b': np.array(range(5, 9))})
tmon_test = CyclingDataMonitor(X={'a': list(range(4)),
'b': np.array(range(5, 9))})
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
kwargs['callback_signal'] = 'initialize_test'
with self.assertRaises(Exception):
tmon._initialize_test(kwargs)
tmon_test._initialize(kwargs)
kwargs['callback_signal'] = 'pre_fit'
tmon._pre_fit(kwargs)
tmon._pre_train_batch({'net': net})
assert np.all(net.blobs['a'].data[...] == [0, 1, 2])
assert np.all(net.blobs['b'].data[...] == [5, 6, 7])
tmon._pre_train_batch({'net': net})
assert np.all(net.blobs['a'].data[...] == [3, 0, 1])
assert np.all(net.blobs['b'].data[...] == [8, 5, 6])
kwargs['callback_signal'] = 'pre_test'
tmon_test._pre_test(kwargs)
tmon_test._pre_test_batch({'testnet': net})
assert np.all(net.blobs['a'].data[...] == [0, 1, 2])
assert np.all(net.blobs['b'].data[...] == [5, 6, 7])
def test_CyclingDataMonitor_only_preload(self):
"""Test the cycling data monitor preload capability."""
import barrista.design as design
import numpy as np
from barrista.monitoring import CyclingDataMonitor
netspec = design.NetSpecification([[3], [3]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
tmon = CyclingDataMonitor(
only_preload=['a', 'b'],
X={'a': list(range(4)),
'b': np.array(range(5, 9))})
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
kwargs['callback_signal'] = 'pre_fit'
tmon._pre_fit(kwargs)
kwargs = {'net': net, 'testnet': net, 'callback_signal': 'pre_batch'}
tmon._pre_train_batch(kwargs)
assert np.all(kwargs['data_orig']['a'] == [0, 1, 2])
assert np.all(kwargs['data_orig']['b'] == [5, 6, 7])
tmon._pre_train_batch(kwargs)
assert np.all(kwargs['data_orig']['a'] == [3, 0, 1])
assert np.all(kwargs['data_orig']['b'] == [8, 5, 6])
tmon._pre_test_batch(kwargs)
assert np.all(kwargs['data_orig']['a'] == [2, 3, 0])
assert np.all(kwargs['data_orig']['b'] == [7, 8, 5])
def test_CyclingDataMonitor_augmentation(self):
"""Test the cycling data monitor color data augmentation capability."""
import barrista.design as design
import numpy as np
from barrista.monitoring import CyclingDataMonitor
netspec = design.NetSpecification([[1, 3, 5, 5], [3]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
ddta1 = np.zeros((3, 5, 5))
ddta1[0, 0, 0] = 1.
ddta2 = np.zeros((3, 5, 5))
ddta2[1, 1, 1] = 1.
tmon = CyclingDataMonitor(
only_preload=['a', 'b'],
X={'a': [ddta1, ddta2],
'b': np.array(range(5, 7))},
color_data_augmentation_sigmas={'a': 0.1})
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
# Filling of unspecified sigmas.
self.assertEqual(tmon._color_data_augmentation_sigmas,
{'a': 0.1, 'b': 0.})
# Equal weights for the first two components.
self.assertEqual(len(tmon._color_data_augmentation_weights), 1)
self.assertAlmostEqual(tmon._color_data_augmentation_weights['a'][0],
tmon._color_data_augmentation_weights['a'][1],
delta=0.01)
# Third one zero.
self.assertEqual(tmon._color_data_augmentation_weights['a'][2], 0.)
# Check components: orthogonal first two, zeros for third.
self.assertEqual(len(tmon._color_data_augmentation_components), 1)
self.assertEqual(np.dot(
tmon._color_data_augmentation_components['a'][:, 0].T,
tmon._color_data_augmentation_components['a'][:, 1]),
0.)
self.assertTrue(
np.all(
tmon._color_data_augmentation_components['a'][2, :2] ==
[0, 0]))
self.assertTrue(
np.all(
tmon._color_data_augmentation_components['a'][:2, 2] ==
[0, 0]))
kwargs['callback_signal'] = 'pre_fit'
tmon._pre_fit(kwargs)
kwargs = {'net': net, 'testnet': net, 'callback_signal': 'pre_batch'}
tmon._pre_train_batch(kwargs)
# Test layerwise application.
self.assertTrue(np.all(kwargs['data_orig']['a'][0][2] == 0))
diff0 = ddta1[0, 0, 0] - kwargs['data_orig']['a'][0][0, 0, 0]
# pylint: disable=superfluous-parens
# print(np.max(np.abs(ddta1[0] - kwargs['data_orig']['a'][0][0] -
# diff0)))
self.assertTrue(np.all(np.isclose(
ddta1[0] - kwargs['data_orig']['a'][0][0],
diff0, rtol=1e-04, atol=1e-07)))
diff1 = ddta1[1, 0, 0] - kwargs['data_orig']['a'][0][1, 0, 0]
self.assertTrue(np.all(np.isclose(
ddta1[1] - kwargs['data_orig']['a'][0][1],
diff1, rtol=1e-04, atol=1e-07)))
diff2 = ddta1[2, 0, 0] - kwargs['data_orig']['a'][0][2, 0, 0]
self.assertEqual(diff2, 0.)
self.assertTrue(np.all(np.isclose(
ddta1[2] - kwargs['data_orig']['a'][0][2],
diff2, rtol=1e-04, atol=1e-07)))
def test_CyclingDataMonitor_resizing(self):
"""Test the cycling data monitor resizing capability."""
import barrista.design as design
import numpy as np
from barrista.monitoring import CyclingDataMonitor
import barrista.monitoring as bm
if bm._cv2 is None:
# OpenCV is not available, so skip the test.
return
netspec = design.NetSpecification([[3, 3, 10, 10], [3, 3, 5, 5]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
tmon = CyclingDataMonitor(
X={'a': [np.ones((3, 5, 5))] * 2,
'b': np.ones((2, 3, 5, 5))},
input_processing_flags={'a': 'rc', 'b':'rn'})
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
tmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
kwargs = {'net': net, 'testnet': net}
tmon._pre_train_batch(kwargs)
assert np.all(net.blobs['a'].data[...] == 1.)
assert np.all(net.blobs['b'].data[...] == 1.)
def test_CyclingDataMonitor_padding(self):
"""Test the cycling data monitor padding capability."""
import barrista.design as design
import numpy as np
from barrista.monitoring import CyclingDataMonitor
netspec = design.NetSpecification([[3, 3, 10, 10], [3, 3, 7, 7]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
tmon = CyclingDataMonitor(
X={'a': [np.ones((3, 5, 5))] * 2,
'b': np.ones((2, 3, 5, 5))},
input_processing_flags={'a': 'p0', 'b':'p2'})
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
tmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
kwargs = {'net': net, 'testnet': net}
tmon._pre_train_batch(kwargs)
assert np.sum(net.blobs['a'].data[...]) == 225.
assert np.sum(net.blobs['b'].data[...]) == 225. + 432.
def test_ResizingMonitor(self):
"""Test the resizing monitor."""
import barrista.design as design
import barrista.solver as sv
import numpy as np
from barrista.monitoring import CyclingDataMonitor, ResizingMonitor
netspec = design.NetSpecification([[1, 3, 5, 5], [1, 1, 5, 5]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
netspec.layers.append(design.ConvolutionLayer(
Convolution_kernel_size=3,
Convolution_num_output=1))
net = netspec.instantiate()
dmon = CyclingDataMonitor(
only_preload=['a', 'b'],
X={'a': [np.zeros((3, 6, 6)), np.zeros((3, 7, 9))],
'b': [np.ones((1, 6, 6)), np.ones((1, 7, 9))]})
tmon = ResizingMonitor(
blobinfos={'a': 1, 'b': 2},
net_input_size_adjustment_multiple_of=2
)
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
dmon._initialize_train(kwargs)
dmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
tmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
kwargs = {'net': net, 'testnet': net}
dmon._pre_train_batch(kwargs)
tmon._pre_train_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data[...]), 39.)
self.assertEqual(np.sum(net.blobs['b'].data[...]), 36. + 26.)
dmon._pre_train_batch(kwargs)
tmon._pre_train_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data[...]), 0.)
self.assertEqual(net.blobs['a'].data.shape, (1, 3, 7, 9))
self.assertEqual(np.sum(net.blobs['b'].data[...]), 63.)
self.assertEqual(net.blobs['b'].data.shape, (1, 1, 7, 9))
dmon._pre_test_batch(kwargs)
tmon._pre_test_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data[...]), 39.)
self.assertEqual(np.sum(net.blobs['b'].data[...]), 36. + 26.)
# Check that the parallel filling works.
dmon = CyclingDataMonitor(
only_preload=['a', 'b'],
X={'a': [np.zeros((3, 6, 6)), np.zeros((3, 7, 9))],
'b': [np.ones((1, 6, 6)), np.ones((1, 7, 9))]})
tmon = ResizingMonitor(
blobinfos={'a': 1, 'b': 2},
net_input_size_adjustment_multiple_of=2
)
net.fit(3, sv.SGDSolver(base_lr=0.01), train_callbacks=[dmon, tmon])
self.assertEqual(np.sum(net.blobs['a'].data[...]), 39.)
self.assertEqual(np.sum(net.blobs['b'].data[...]), 36. + 26.)
def test_ResizingMonitor_fixed_scale(self):
"""Test the resizing monitor scaling capability."""
import barrista.design as design
import numpy as np
from barrista.monitoring import CyclingDataMonitor, ResizingMonitor
import barrista.monitoring as bm
if bm._cv2 is None:
# OpenCV is not available, so skip the test.
return
netspec = design.NetSpecification([[1, 3, 5, 5], [1, 1, 5, 5]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
dmon = CyclingDataMonitor(
only_preload=['a', 'b'],
X={'a': [np.zeros((3, 6, 6)), np.zeros((3, 7, 9))],
'b': [np.ones((1, 6, 6)), np.ones((1, 7, 9))]})
tmon = ResizingMonitor(
blobinfos={'a': 1, 'b': 2},
base_scale=2.,
net_input_size_adjustment_multiple_of=2,
interp_methods={'a':'c', 'b':'n'}
)
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
dmon._initialize_train(kwargs)
dmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
tmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
kwargs = {'net': net, 'testnet': net}
dmon._pre_train_batch(kwargs)
tmon._pre_train_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data[...]), 75.)
self.assertEqual(np.sum(net.blobs['b'].data[...]), 36.*4. + 50.)
dmon._pre_train_batch(kwargs)
tmon._pre_train_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data[...]),
(15.*19.-14.*18.)*3.)
self.assertEqual(net.blobs['a'].data.shape, (1, 3, 15, 19))
self.assertEqual(np.sum(net.blobs['b'].data[...]),
(15.*19.-14.*18.)*2.+14.*18.)
self.assertEqual(net.blobs['b'].data.shape, (1, 1, 15, 19))
dmon._pre_test_batch(kwargs)
tmon._pre_test_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data[...]), 75.)
self.assertEqual(np.sum(net.blobs['b'].data[...]), 36.*4. + 50.)
def test_ResizingMonitor_random_scale(self):
"""Test the resizing monitor random scale capability."""
import barrista.design as design
import numpy as np
from barrista.monitoring import CyclingDataMonitor, ResizingMonitor
import barrista.monitoring as bm
if bm._cv2 is None:
# OpenCV is not available, so skip the test.
return
netspec = design.NetSpecification([[1, 3, 5, 5], [1, 1, 5, 5]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
dmon = CyclingDataMonitor(
only_preload=['a', 'b'],
X={'a': [np.zeros((3, 6, 6))],
'b': [np.ones((1, 6, 6))]})
tmon = ResizingMonitor(
blobinfos={'a': 1, 'b': 2},
base_scale=2.,
random_change_up_to=0.5,
net_input_size_adjustment_multiple_of=1,
interp_methods={'a':'c', 'b':'n'}
)
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
dmon._initialize_train(kwargs)
dmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
tmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
kwargs = {'net': net, 'testnet': net}
scales = []
np.random.seed(1)
for _ in range(1000):
dmon._pre_train_batch(kwargs)
tmon._pre_train_batch(kwargs)
scales.append(net.blobs['a'].data.shape[2])
from scipy.stats import chisquare, itemfreq
freq = itemfreq(scales)[:, 1]
_, pvalue = chisquare(freq)
self.assertTrue(pvalue > 0.1)
def test_RotatingMirroringMonitor(self):
"""Test the rotating mirroring monitor."""
import barrista.design as design
import numpy as np
from barrista.monitoring import (CyclingDataMonitor,
RotatingMirroringMonitor)
netspec = design.NetSpecification([[1, 3, 5, 5], [1, 1, 5, 5]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
adata = np.zeros((3, 5, 5))
adata[:, 0, 0:3] = 1.
bdata = np.ones((1, 5, 5))
bdata[:, 0, 0:3] = 0.
dmon = CyclingDataMonitor(
X={'a': [adata],
'b': [bdata]})
tmon = RotatingMirroringMonitor(
blobinfos={'a': 2, 'b': 2},
max_rotation_degrees=90.
)
np.random.seed(2748)
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
dmon._initialize_train(kwargs)
dmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
tmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
kwargs = {'net': net, 'testnet': net}
dmon._pre_train_batch(kwargs)
tmon._pre_train_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data), 54.)
self.assertTrue(np.all(net.blobs['a'].data[:, :, 2:4, 0] == 1.))
self.assertEqual(np.sum(net.blobs['b'].data), 31.)
self.assertTrue(np.all(net.blobs['b'].data[:, :, 2:4, 0] == 0.))
def test_RotatingMirroringMonitor_mirroring(self):
"""Test the rotating mirroring monitor mirroring capability."""
import barrista.design as design
import numpy as np
from barrista.monitoring import (CyclingDataMonitor,
RotatingMirroringMonitor)
netspec = design.NetSpecification([[1, 3, 5, 5], [1, 1, 5, 5]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
adata = np.zeros((3, 5, 5))
adata[:, 0, 0:3] = 1.
bdata = np.ones((1, 5, 5))
bdata[:, 0, 0:3] = 0.
dmon = CyclingDataMonitor(
X={'a': [adata],
'b': [bdata]})
tmon = RotatingMirroringMonitor(
blobinfos={'a': 2, 'b': 2},
max_rotation_degrees=0.,
mirror_prob=0.5
)
np.random.seed(2748)
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
dmon._initialize_train(kwargs)
dmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
tmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
kwargs = {'net': net, 'testnet': net}
dmon._pre_train_batch(kwargs)
tmon._pre_train_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data), np.sum(adata))
self.assertTrue(np.all(net.blobs['a'].data[:, :, 0, 2:4] == 1.))
self.assertEqual(np.sum(net.blobs['b'].data), np.sum(bdata))
self.assertTrue(np.all(net.blobs['b'].data[:, :, 0, 2:4] == 0.))
def test_RotatingMirroringMonitor_mirroring_swapvalues(self):
"""Test the rotating mirroring monitor mirroring swap capability."""
import barrista.design as design
import numpy as np
from barrista.monitoring import (CyclingDataMonitor,
RotatingMirroringMonitor)
netspec = design.NetSpecification([[1, 3, 5, 5], [1, 1, 5, 5]],
inputs=['a', 'b'],
phase=design.Phase.TRAIN)
net = netspec.instantiate()
adata = np.zeros((3, 5, 5))
adata[:, 0, 0:3] = 1.
bdata = np.ones((1, 5, 5))
bdata[:, 0, 0:3] = 0.
dmon = CyclingDataMonitor(
X={'a': [adata],
'b': [bdata]})
tmon = RotatingMirroringMonitor(
blobinfos={'a': 3, 'b': 3},
max_rotation_degrees=0.,
mirror_prob=0.5,
mirror_value_swaps={'a': {1: [(0, 1), (1, 2)]}},
mirror_layer_swaps={'a': [(1, 2)]}
)
np.random.seed(2748)
kwargs = {'net': net,
'testnet': net,
'callback_signal': 'initialize_train'}
tmon._initialize_train(kwargs)
dmon._initialize_train(kwargs)
dmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
tmon._pre_fit({'net': net, 'callback_signal': 'pre_fit'})
kwargs = {'net': net, 'testnet': net}
dmon._pre_train_batch(kwargs)
tmon._pre_train_batch(kwargs)
self.assertEqual(np.sum(net.blobs['a'].data[:, (0, 1), :, :]),
np.sum(adata[(0, 2), :, :]))
self.assertEqual(np.sum(net.blobs['a'].data[:, 2, :, :]),
np.sum(adata[1, :, :]+1))
self.assertTrue(np.all(net.blobs['a'].data[:, (0, 1), 0, 2:4] == 1.))
self.assertTrue(np.all(net.blobs['a'].data[:, 2, 0, 2:4] == 2.))
self.assertEqual(np.sum(net.blobs['b'].data), np.sum(bdata))
self.assertTrue(np.all(net.blobs['b'].data[:, :, 0, 2:4] == 0.))
def test_Checkpointer(self):
"""Test the ``Checkpointer``."""
import tempfile
import shutil
import os
import barrista.design as design
import numpy as np
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer)
from barrista.monitoring import Checkpointer
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
dirpath = tempfile.mkdtemp()
chckptr = Checkpointer(dirpath+os.sep, 10)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01,
snapshot_prefix=dirpath+os.sep)
net.fit(30,
solver,
X=X,
train_callbacks=[chckptr])
dircontents = os.listdir(dirpath)
self.assertIn('_iter_2.caffemodel', dircontents)
self.assertIn('_iter_3.caffemodel', dircontents)
if hasattr(solver._solver, 'snapshot'):
self.assertIn('_iter_2.solverstate', dircontents)
self.assertIn('_iter_3.solverstate', dircontents)
shutil.rmtree(dirpath)
def test_GradientMonitor(self):
"""Test the ``GradientMonitor``."""
try:
import matplotlib.pyplot as plt
_ = plt.figure()
except RuntimeError:
return
except ImportError:
return
import barrista.design as design
import numpy as np
import os
from barrista.design import (ConvolutionLayer, ReLULayer,
SoftmaxWithLossLayer, InnerProductLayer)
from barrista.monitoring import GradientMonitor
from barrista.tools import TemporaryDirectory
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.PROTODETAIL.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 0,
'Convolution_weight_filler': design.PROTODETAIL.FillerParameter(
type='xavier')}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
layers.append(InnerProductLayer(
InnerProduct_num_output=2,
name='net_out',
InnerProduct_weight_filler=design.PROTODETAIL.FillerParameter(
type='xavier')))
layers.append(SoftmaxWithLossLayer(bottoms=['net_out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
# For fit.
solver = _solver.SGDSolver(
base_lr=0.01)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
X['data'][:, 0, 0, 0] = 1.
with TemporaryDirectory() as tmpdir:
net.fit(100,
solver,
X,
train_callbacks=[GradientMonitor(10, tmpdir + os.sep)])
for idx in range(10):
self.assertTrue(os.path.exists(os.path.join(
tmpdir,
'gradient_hists_{}.png'.format(idx))))
self.assertTrue(os.path.exists(os.path.join(
tmpdir,
'gradient_magnitude_{}.png'.format(idx))))
net.fit(100,
solver,
X,
train_callbacks=[GradientMonitor(10,
tmpdir + os.sep,
relative=True)])
for idx in range(10):
self.assertTrue(os.path.exists(os.path.join(
tmpdir,
'gradient_hists_rel_{}.png'.format(idx))))
self.assertTrue(os.path.exists(os.path.join(
tmpdir,
'gradient_magnitude_rel_{}.png'.format(idx))))
def test_ActivationMonitor(self):
"""Test the ``ActivationMonitor``."""
try:
import matplotlib.pyplot as plt
_ = plt.figure()
except RuntimeError:
return
except ImportError:
return
import barrista.design as design
import numpy as np
import os
from barrista.design import (ConvolutionLayer, ReLULayer,
SoftmaxWithLossLayer, InnerProductLayer)
from barrista.monitoring import ActivationMonitor
from barrista.tools import TemporaryDirectory
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.PROTODETAIL.TRAIN)
layers = []
conv_params = {'name': 'conv',
'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 0,
'Convolution_weight_filler': design.PROTODETAIL.FillerParameter(
type='xavier')}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
layers.append(InnerProductLayer(
InnerProduct_num_output=2,
name='net_out',
InnerProduct_weight_filler=design.PROTODETAIL.FillerParameter(
type='xavier')))
layers.append(SoftmaxWithLossLayer(bottoms=['net_out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
# For fit.
solver = _solver.SGDSolver(
base_lr=0.01)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
X['data'][:, 0, 0, 0] = 1.
with TemporaryDirectory() as tmpdir:
net.fit(100,
solver,
X,
train_callbacks=[ActivationMonitor(10, tmpdir + os.sep)])
for idx in range(10):
self.assertTrue(os.path.exists(os.path.join(
tmpdir,
'activations_conv_{}.png'.format(idx))))
def test_FilterMonitor(self):
"""Test the ``FilterMonitor``."""
try:
import matplotlib.pyplot as plt
_ = plt.figure()
except RuntimeError:
return
except ImportError:
return
import barrista.design as design
import numpy as np
import os
from barrista.design import (ConvolutionLayer, ReLULayer,
SoftmaxWithLossLayer, InnerProductLayer)
from barrista.monitoring import FilterMonitor
from barrista.tools import TemporaryDirectory
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.PROTODETAIL.TRAIN)
layers = []
conv_params = {'name': 'conv',
'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 0,
'Convolution_weight_filler': design.PROTODETAIL.FillerParameter(
type='xavier')}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
layers.append(InnerProductLayer(
InnerProduct_num_output=2,
name='net_out',
InnerProduct_weight_filler=design.PROTODETAIL.FillerParameter(
type='xavier')))
layers.append(SoftmaxWithLossLayer(bottoms=['net_out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
# For fit.
solver = _solver.SGDSolver(
base_lr=0.01)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
X['data'][:, 0, 0, 0] = 1.
with TemporaryDirectory() as tmpdir:
net.fit(100,
solver,
X,
train_callbacks=[FilterMonitor(10, tmpdir + os.sep)])
for idx in range(10):
self.assertTrue(os.path.exists(os.path.join(
tmpdir,
'parameters_conv_0_{}.png'.format(idx))))
class NetTestCase(unittest.TestCase):
"""Test the new ``Net`` functions."""
def test_instantiation(self):
"""Test ``Net`` constructors."""
import barrista.design as design
from barrista.design import ConvolutionLayer, ReLULayer, Phase
from barrista.net import Net
import tempfile
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
with tempfile.NamedTemporaryFile(mode='r',
suffix=".prototxt") as tmpfile:
netspec.to_prototxt(output_filename=tmpfile.name)
tmpfile.file.flush()
net = Net(tmpfile.name, Phase.TEST)
# In older versions of caffe, the input layer was not visible.
self.assertTrue(len(net.layers) in [2, 3])
self.assertEqual(net.blobs[net.inputs[0]].data.shape, (10, 3, 3, 3))
self.assertTrue(net.blobs[net.inputs[1]].data.shape == (10,) or
net.blobs[net.inputs[1]].data.shape == (10, 1, 1, 1))
def test_dual_net_use(self):
"""Test the specification of a prediction net."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer,
SoftmaxLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxLayer(bottoms=['out'],
include_stages=['predict'],
name='softmax'))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations'],
include_stages=['fit']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations'],
include_stages=['fit']))
netspec.layers.extend(layers)
net = netspec.instantiate()
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01)
net.fit(20,
solver,
X)
predictions = np.array(net.predict(np.zeros((10, 3, 3, 3))))
predictions = np.argmax(predictions, axis=1)
self.assertEqual(np.sum(predictions == 1), 10)
# Force to use the fit network.
accy = net.predict(X,
use_fit_network=True,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_reshape_blob(self):
"""Test the reshaping of a blob across nets."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer,
SoftmaxWithLossLayer, AccuracyLayer,
SoftmaxLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10, 1, 3, 3]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1,
'name': 'out'}
layers.append(ConvolutionLayer(**conv_params))
layers.append(SoftmaxLayer(bottoms=['out'],
include_stages=['predict'],
name='softmax'))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations'],
include_stages=['fit']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations'],
include_stages=['fit']))
netspec.layers.extend(layers)
net = netspec.instantiate()
net.reshape_blob('data', 10, 3, 5, 5)
net.blobs['annotations'].reshape(10, 1, 5, 5)
X = {'data': np.zeros((10, 3, 5, 5), dtype='float32'),
'annotations': np.ones((10, 1, 5, 5), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01)
net.fit(20,
solver,
X)
predictions = np.array(net.predict(np.zeros((10, 3, 5, 5))))
predictions = np.argmax(predictions, axis=1)
self.assertEqual(np.sum(predictions == 1), 250)
# Force to use the fit network.
accy = net.predict(X,
use_fit_network=True,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_load_blobs_from(self):
"""Test the loading method."""
import tempfile
import shutil
import os
import barrista.design as design
import numpy as np
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer)
from barrista.monitoring import Checkpointer
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(name='outlbf',
InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
dirpath = tempfile.mkdtemp()
chckptr = Checkpointer(dirpath + os.sep, 10)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01,
snapshot_prefix=dirpath+os.sep)
net.fit(20,
solver,
X=X,
train_callbacks=[chckptr])
checkp0_data = net.params['_layer_0'][0].data.copy()
net.params['_layer_0'][0].data[...] = 10.
assert np.any(net.params['_layer_0'][0].data != checkp0_data)
net.load_blobs_from(os.path.join(dirpath, '_iter_2.caffemodel'))
assert np.all(net.params['_layer_0'][0].data == checkp0_data)
if (hasattr(solver._solver, 'restore') and
hasattr(solver._solver, 'snapshot')):
# Check for newer versions of caffe the solver restore method.
solver._solver.restore(os.path.join(dirpath, '_iter_2.solverstate'))
shutil.rmtree(dirpath)
def test_multiinput(self):
"""Test multiinput prediction."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01)
net.fit(20,
solver,
X)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
if CV2_AVAILABLE:
accy = net.predict(X,
input_processing_flags={'data': 'rc',
'annotations': 'n'},
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
accy = net.predict(X, input_processing_flags={'data': 'p0',
'annotations': 'n'},
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_multioutput(self):
"""Test multioutput prediction."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, EuclideanLossLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 7, 7], [10, 1, 7, 7]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 7, 7]])
layers = []
layers.append(ConvolutionLayer(Convolution_kernel_size=3,
Convolution_num_output=1,
Convolution_pad=1,
name='conv1',
tops=['conv1_out']))
layers.append(ConvolutionLayer(Convolution_kernel_size=3,
Convolution_num_output=1,
Convolution_pad=1,
name='conv2',
tops=['conv2_out'],
bottoms=['data']))
layers.append(EuclideanLossLayer(name='loss1',
bottoms=['conv1_out', 'annotations'],
include_stages=['fit']))
layers.append(EuclideanLossLayer(name='loss2',
bottoms=['conv2_out', 'annotations'],
include_stages=['fit']))
netspec.layers.extend(layers)
net = netspec.instantiate()
X = {'data': np.zeros((10, 3, 7, 7), dtype='float32'),
'annotations': np.ones((10, 1, 7, 7), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01)
net.fit(20,
solver,
X)
pred = net.predict([np.zeros((3, 3, 3))],
input_processing_flags={'data': 'p0'},
output_processing_flags={'conv1_out': 'p0',
'conv2_out': 'n'})
assert pred['conv1_out'][0].shape == (1, 3, 3)
assert pred['conv2_out'][0].shape == (1, 7, 7)
pred = net.predict([np.zeros((3, 3, 3))],
input_processing_flags={'data': 'p0'},
output_processing_flags={'conv1_out': 'n',
'conv2_out': 'p0'})
assert pred['conv1_out'][0].shape == (1, 7, 7)
assert pred['conv2_out'][0].shape == (1, 3, 3)
def test_predict_sliding_window(self):
"""Test the ``predict_sliding_window`` method."""
if not CV2_AVAILABLE:
return
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
EuclideanLossLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=1,
tops=['out']))
layers.append(EuclideanLossLayer(name='se',
bottoms=['out', 'annotations'],
include_stages=['fit']))
netspec.layers.extend(layers)
net = netspec.instantiate()
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01)
net.fit(20,
solver,
X)
# Rescaling.
predictions = np.array(net.predict_sliding_window(
np.zeros((10, 3, 5, 5))))
self.assertEqual(np.sum(predictions != 0.), 90)
# Step size.
predictions = np.array(net.predict_sliding_window(
np.zeros((10, 3, 5, 5)),
extraction_step=(1, 2)))
self.assertEqual(np.sum(predictions != 0.), 90)
predictions = np.array(net.predict_sliding_window(
np.zeros((10, 3, 5, 5)),
extraction_step=(1, 2),
account_for_step=False,
pad_border=False))
self.assertEqual(np.sum(predictions != 0.), 60)
predictions = np.array(net.predict_sliding_window(
np.zeros((10, 3, 5, 5)),
extraction_step=(1, 2),
account_for_step=True,
pad_border=False))
self.assertEqual(np.sum(predictions != 0.), 90)
def test_predict_sliding_window_eq_out(self):
"""Test the ``predict_sliding_window`` method with full size output."""
if not CV2_AVAILABLE:
return
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
EuclideanLossLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3]],
inputs=['data'])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
netspec.layers.extend(layers)
net = netspec.instantiate()
_ = np.array(net.predict_sliding_window(
np.zeros((10, 3, 5, 5))))
_ = np.array(net.predict_sliding_window(
np.zeros((10, 3, 5, 5)),
overlap_combine_max=False))
def test_predict(self):
"""Test the ``predict`` method."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, SoftmaxLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxLayer(bottoms=['out'],
include_stages=['predict'],
name='softmax'))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations'],
include_stages=['fit']))
netspec.layers.extend(layers)
net = netspec.instantiate()
solver = _solver.SGDSolver(
base_lr=0.01)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(20,
solver,
X)
if CV2_AVAILABLE:
# Rescaling.
predictions = np.array(net.predict(np.zeros((10, 3, 1, 1)),
input_processing_flags={'data': 'rl'}))
predictions = np.argmax(predictions, axis=1)
self.assertEqual(np.sum(predictions == 1), 10)
# Padding.
predictions_padded = np.array(
net.predict(np.zeros((10, 3, 1, 1)),
input_processing_flags={'data': 'p0'}))
predictions = np.argmax(predictions_padded, axis=1)
self.assertEqual(np.sum(predictions == 1), 10)
# out_layer_names.
predictions = np.array(
net.predict(np.zeros((10, 3, 1, 1)),
input_processing_flags={'data': 'p0'},
out_blob_names=['out'],
input_size_spec=(10, 10)))
predictions = np.argmax(predictions, axis=1)
self.assertEqual(np.sum(predictions == 1), 10)
# Static inputs.
predictions = np.array(net.predict(
{'data': np.zeros((10, 3, 3, 3))},
static_inputs=['data']))
predictions = np.argmax(predictions, axis=1)
self.assertEqual(np.sum(predictions == 1), 10)
# Upscaling.
_ = np.array(
net.predict(np.zeros((10, 3, 1, 1)),
input_processing_flags={'data': 'p0'},
output_processing_flags={'softmax': 'p0'}))
# Oversample.
predictions = np.array(net.predict(np.zeros((10, 3, 1, 1)),
oversample=True))
np.testing.assert_allclose(predictions, predictions_padded, rtol=1e-05)
if CV2_AVAILABLE:
predictions = np.array(net.predict(np.zeros((10, 3, 1, 1)),
oversample=True,
before_oversample_resize_to=(5, 5)))
np.testing.assert_allclose(predictions, predictions_padded, rtol=1e-05)
def test_predict_upscaling(self):
"""Test the ``predict`` method upscaling capability."""
if not CV2_AVAILABLE:
return
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, SoftmaxLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10, 1, 1, 1]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[10, 3, 3, 3]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 0,
'name': 'out'}
layers.append(ConvolutionLayer(**conv_params))
layers.append(SoftmaxLayer(bottoms=['out'],
include_stages=['predict'],
name='softmax'))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations'],
include_stages=['fit']))
netspec.layers.extend(layers)
net = netspec.instantiate()
solver = _solver.SGDSolver(
base_lr=0.01)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(20,
solver,
X)
# Upscaling.
predictions_us = np.array(
net.predict(np.zeros((10, 3, 1, 1)),
input_processing_flags={'data': 'p0'},
output_processing_flags={'softmax': 'p0'}))
self.assertTrue(np.all(predictions_us.shape == (10, 3, 1, 1)))
def test_visualize(self):
"""Test the ``visualize`` function."""
import barrista.design as design
# pylint: disable=W0212
if design._draw is None:
return
from barrista.design import ConvolutionLayer, ReLULayer
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
inputs=['data', 'annotations'],
predict_inputs=['data'],
predict_input_shapes=[[2, 3, 2, 2]])
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 32,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
netspec.layers.extend(layers)
net = netspec.instantiate()
viz = net.visualize()
self.assertEqual(viz.ndim, 3)
class ToolsTestCase(unittest.TestCase):
"""Test the tools module."""
def test_pad(self):
"""Test the padding function."""
import numpy as np
from barrista import tools
tim = np.ones((3, 1, 1))
padded = tools.pad(tim, (3, 3))
aim = np.zeros((3, 3, 3))
aim[:, 1, 1] = 1.
self.assertTrue(np.all(aim == padded))
padded, padding = tools.pad(tim, (3, 3), get_padding=True)
aim = np.zeros((3, 3, 3))
aim[:, 1, 1] = 1.
self.assertTrue(np.all(aim == padded))
self.assertEqual(padding, ((0, 0), (1., 1.), (1., 1.)))
class ExampleTestCase(unittest.TestCase):
"""Test that the example runs successfully."""
def test_running(self):
"""Run it."""
import sys
import subprocess
subprocess.check_call([sys.executable,
'examples/showcase.py'])
class SolverTestCase(unittest.TestCase):
"""Test the tools module."""
def test_fit(self):
"""Test the fit function."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = _solver.SGDSolver(base_lr=0.01)
solver.fit(20,
net=net,
X=X)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
new_net = netspec.instantiate()
new_solver = _solver.SGDSolver(net=new_net,
base_lr=0.01)
new_solver.fit(20,
X)
accy = new_net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
new_net = netspec.instantiate()
new_solver = _solver.SGDSolver(net=new_net,
base_lr=0.01)
new_solver.fit(20,
X,
use_fit_phase_for_validation=True)
accy = new_net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
new_solver.fit(20,
X,
X_val=X,
test_initialization=True,
test_interval=10,
use_fit_phase_for_validation=True)
accy = new_net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_restore(self):
"""Test the ``restore`` method."""
import tempfile
import shutil
import os
import barrista.design as design
import numpy as np
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, PROTODETAIL)
from barrista.monitoring import Checkpointer
from barrista import solver as _solver
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1,
'Convolution_weight_filler':
PROTODETAIL.FillerParameter(type='xavier')}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(name='outlbf',
InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
dirpath = tempfile.mkdtemp()
chckptr = Checkpointer(dirpath + os.sep, 10)
X = {'data': np.ones((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = _solver.SGDSolver(
base_lr=0.01,
snapshot_prefix=dirpath+os.sep)
net.fit(30,
solver,
X=X,
train_callbacks=[chckptr])
if not (hasattr(solver._solver, 'restore') and
hasattr(solver._solver, 'snapshot')):
return
newsolver = _solver.SGDSolver(
base_lr=0.01,
snapshot_prefix=dirpath+os.sep)
newnet = netspec.instantiate()
newsolver.restore(os.path.join(dirpath, '_iter_2.solverstate'),
newnet)
newsolver.fit(10,
X=X)
self.assertTrue(np.all(net.params['_layer_0'][0].data[...] ==
newnet.params['_layer_0'][0].data[...]))
shutil.rmtree(dirpath)
def test_sgd(self):
"""Test the stochastic gradient descent."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
#######################################################################
# test sgd solver
#######################################################################
from barrista import solver as _solver
tmp = _solver.Get_solver_class('sgd')
self.assertTrue(issubclass(tmp, _solver.SGDSolver))
tmp = _solver.Get_caffe_solver_class(_solver.SolverType.SGD)
self.assertTrue(issubclass(tmp, _solver.SGDSolver))
with self.assertRaises(KeyError):
_ = _solver.Get_solver_class('test')
with self.assertRaises(TypeError):
tmp(2)
with self.assertRaises(Exception):
tmp(iter_size=2)
tmp_instance = tmp(base_lr=2)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 1)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params = {'net': net, 'base_lr': 2}
if 'iter_size' in solver_parameter_dict.keys():
params['iter_size'] = 2
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params['regularization_type'] = 'L1'
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L1')
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
if 'iter_size' in solver_parameter_dict.keys():
params['iter_size'] = 3
params['regularization_type'] = '--'
with self.assertRaises(AssertionError):
_ = tmp(**params).Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='step').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='xx').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='exp').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='inv').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='multistep').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='poly').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net, # noqa
base_lr=2,
lr_policy='sigmoid').Get_parameter_dict()
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
solver = tmp(base_lr=0.01)
solver.fit(20,
X,
net=net)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
solver = tmp(net=net,
base_lr=0.01)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(20,
solver,
X)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_nesterov(self):
"""Test the nesterov solver."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
#######################################################################
# test nesterov solver
#######################################################################
from barrista import solver as _solver
tmp = _solver.Get_solver_class('nesterov')
self.assertTrue(issubclass(tmp, _solver.NesterovSolver))
tmp = _solver.Get_caffe_solver_class(_solver.SolverType.NESTEROV)
self.assertTrue(issubclass(tmp, _solver.NesterovSolver))
with self.assertRaises(TypeError):
tmp(2)
with self.assertRaises(Exception):
tmp(iter_size=2)
tmp_instance = tmp(base_lr=2)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 1)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['momentum'], 0.0)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params = {'net': net, 'base_lr': 2}
if 'iter_size' in solver_parameter_dict.keys():
params['iter_size'] = 2
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['momentum'], 0.0)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params['regularization_type'] = 'L1'
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L1')
self.assertEqual(solver_parameter_dict['momentum'], 0.0)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params['momentum'] = 1.
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L1')
self.assertEqual(solver_parameter_dict['momentum'], 1.0)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
if 'iter_size' in solver_parameter_dict.keys():
params['iter_size'] = 3
params['regularization_type'] = '--'
del params['momentum']
with self.assertRaises(AssertionError):
_ = tmp(**params).Get_parameter_dict()
del params['regularization_type']
params['lr_policy'] = 'step'
with self.assertRaises(AssertionError):
_ = tmp(**params).Get_parameter_dict()
params['lr_policy'] = 'xx'
with self.assertRaises(AssertionError):
_ = tmp(**params).Get_parameter_dict()
params['lr_policy'] = 'exp'
with self.assertRaises(AssertionError):
_ = tmp(**params).Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='inv').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='multistep').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
lr_policy='poly').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net, # noqa
base_lr=2,
lr_policy='sigmoid').Get_parameter_dict()
solver = tmp(base_lr=0.01,
momentum=0.95)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(20,
solver,
X)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_rmsprop(self):
"""Test the RMSProp solver."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
#######################################################################
# test rmsprop solver
#######################################################################
from barrista import solver as _solver
if not hasattr(_solver.SolverType, 'RMSPROP'):
return
tmp = _solver.Get_solver_class('rmsprop')
self.assertTrue(issubclass(tmp, _solver.RMSPropSolver))
tmp = _solver.Get_caffe_solver_class(_solver.SolverType.RMSPROP)
self.assertTrue(issubclass(tmp, _solver.RMSPropSolver))
with self.assertRaises(TypeError):
tmp(2)
with self.assertRaises(Exception):
tmp(iter_size=2)
with self.assertRaises(Exception):
tmp(base_lr=2)
with self.assertRaises(Exception):
tmp(base_lr=2,
delta=0.1)
tmp_instance = tmp(base_lr=2,
delta=0.1,
rms_decay=0.9)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
self.assertEqual(solver_parameter_dict['iter_size'], 1)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['rms_decay'], 0.9)
self.assertEqual(solver_parameter_dict['delta'], 0.1)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
tmp_instance = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
iter_size=2)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['rms_decay'], 0.9)
self.assertEqual(solver_parameter_dict['delta'], 0.1)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
tmp_instance = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
iter_size=2,
regularization_type='L1')
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L1')
self.assertEqual(solver_parameter_dict['rms_decay'], 0.9)
self.assertEqual(solver_parameter_dict['delta'], 0.1)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
iter_size=3,
regularization_type='--').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
lr_policy='step').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
lr_policy='xx').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
lr_policy='exp').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
lr_policy='inv').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
lr_policy='multistep').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net,
base_lr=2,
delta=0.1,
rms_decay=0.9,
lr_policy='poly').Get_parameter_dict()
with self.assertRaises(AssertionError):
_ = tmp(net=net, # noqa
base_lr=2,
delta=0.1,
rms_decay=0.9,
lr_policy='sigmoid').Get_parameter_dict()
solver = tmp(base_lr=2,
delta=0.1,
rms_decay=0.9)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(20,
solver,
X)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_adadelta(self):
"""Test the Adadelta solver."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
#######################################################################
# test AdaDelta solver
#######################################################################
from barrista import solver as _solver
if not hasattr(_solver.SolverType, 'ADADELTA'):
return
tmp = _solver.Get_solver_class('adadelta')
self.assertTrue(issubclass(tmp, _solver.AdaDeltaSolver))
tmp = _solver.Get_caffe_solver_class(_solver.SolverType.ADADELTA)
self.assertTrue(issubclass(tmp, _solver.AdaDeltaSolver))
with self.assertRaises(TypeError):
tmp(2)
with self.assertRaises(Exception):
tmp(iter_size=2)
with self.assertRaises(Exception):
tmp(base_lr=2)
with self.assertRaises(Exception):
tmp(base_lr=2,
delta=0.1)
tmp_instance = tmp(base_lr=2,
momentum=0.9)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 1)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['momentum'], 0.9)
self.assertEqual(solver_parameter_dict['delta'], 1E-8)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params = {'net': net, 'base_lr': 2, 'momentum': 0.9, 'delta': 0.1}
if 'iter_size' in solver_parameter_dict.keys():
params['iter_size'] = 2
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['momentum'], 0.9)
self.assertEqual(solver_parameter_dict['delta'], 0.1)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
solver = tmp(base_lr=0.001,
momentum=0.9)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(20,
solver,
X)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_adagrad(self):
"""Test the AdaGrad solver."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
#######################################################################
# test AdaGrad solver
#######################################################################
from barrista import solver as _solver
if not hasattr(_solver.SolverType, 'ADAGRAD'):
return
tmp = _solver.Get_solver_class('adagrad')
self.assertTrue(issubclass(tmp, _solver.AdagradSolver))
tmp = _solver.Get_caffe_solver_class(_solver.SolverType.ADAGRAD)
self.assertTrue(issubclass(tmp, _solver.AdagradSolver))
with self.assertRaises(TypeError):
tmp(2)
with self.assertRaises(Exception):
tmp(iter_size=2)
with self.assertRaises(Exception):
tmp(base_lr=2)
tmp_instance = tmp(base_lr=2,
delta=0.1)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 1)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['delta'], 0.1)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params = {'net': net, 'base_lr': 2, 'delta': 0.1}
if 'iter_size' in solver_parameter_dict.keys():
params['iter_size'] = 2
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['delta'], 0.1)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
solver = tmp(base_lr=0.001,
delta=0.1)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(20,
solver,
X)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
def test_adam(self):
"""Test the ADAM solver."""
import numpy as np
import barrista.design as design
from barrista.design import (ConvolutionLayer, InnerProductLayer,
SoftmaxWithLossLayer, AccuracyLayer)
netspec = design.NetSpecification([[10, 3, 3, 3], [10]],
inputs=['data', 'annotations'],
phase=design.Phase.TRAIN)
layers = []
conv_params = {'Convolution_kernel_size': 3,
'Convolution_num_output': 3,
'Convolution_pad': 1}
layers.append(ConvolutionLayer(**conv_params))
layers.append(InnerProductLayer(InnerProduct_num_output=2,
tops=['out']))
layers.append(SoftmaxWithLossLayer(bottoms=['out', 'annotations']))
layers.append(AccuracyLayer(name='accuracy',
bottoms=['out', 'annotations']))
netspec.layers.extend(layers)
net = netspec.instantiate()
#######################################################################
# test adam solver
#######################################################################
from barrista import solver as _solver
if not hasattr(_solver.SolverType, 'ADAM'):
return
tmp = _solver.Get_solver_class('adam')
self.assertTrue(issubclass(tmp, _solver.AdamSolver))
tmp = _solver.Get_caffe_solver_class(_solver.SolverType.ADAM)
self.assertTrue(issubclass(tmp, _solver.AdamSolver))
with self.assertRaises(TypeError):
tmp(2)
with self.assertRaises(Exception):
tmp(iter_size=2)
tmp_instance = tmp(base_lr=2)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 1)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['momentum'], 0.9)
self.assertEqual(solver_parameter_dict['momentum2'], 0.999)
self.assertEqual(solver_parameter_dict['delta'], 1E-8)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params = {'net': net, 'base_lr': 2, 'delta': 0.1}
if 'iter_size' in solver_parameter_dict.keys():
params['iter_size'] = 2
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L2')
self.assertEqual(solver_parameter_dict['momentum'], 0.9)
self.assertEqual(solver_parameter_dict['momentum2'], 0.999)
self.assertEqual(solver_parameter_dict['delta'], 0.1)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
params['momentum2'] = 1.
params['regularization_type'] = 'L1'
tmp_instance = tmp(**params)
solver_parameter_dict = tmp_instance.Get_parameter_dict()
self.assertEqual(solver_parameter_dict['base_lr'], 2)
if 'iter_size' in solver_parameter_dict.keys():
self.assertEqual(solver_parameter_dict['iter_size'], 2)
self.assertEqual(solver_parameter_dict['lr_policy'], 'fixed')
self.assertEqual(solver_parameter_dict['regularization_type'], 'L1')
self.assertEqual(solver_parameter_dict['momentum'], 0.9)
self.assertEqual(solver_parameter_dict['momentum2'], 1.0)
self.assertEqual(solver_parameter_dict['delta'], 0.1)
self.assertNotIn('weight_decay', list(solver_parameter_dict.keys()))
self.assertNotIn('power', list(solver_parameter_dict.keys()))
solver = tmp(base_lr=0.001)
X = {'data': np.zeros((10, 3, 3, 3), dtype='float32'),
'annotations': np.ones((10, 1), dtype='float32')}
net.fit(20,
solver,
X)
accy = net.predict(X,
allow_train_phase_for_test=True)['accuracy'][0]
self.assertEqual(accy, 1.0)
if __name__ == '__main__':
unittest.main()
| 44.147727
| 87
| 0.533536
| 10,980
| 108,780
| 5.103552
| 0.048543
| 0.04779
| 0.055945
| 0.049789
| 0.869658
| 0.844032
| 0.823509
| 0.794921
| 0.775434
| 0.759802
| 0
| 0.02953
| 0.335052
| 108,780
| 2,463
| 88
| 44.165652
| 0.745179
| 0.031908
| 0
| 0.799525
| 0
| 0
| 0.08142
| 0.014907
| 0
| 0
| 0
| 0
| 0.164846
| 1
| 0.022803
| false
| 0
| 0.093112
| 0
| 0.128266
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3b7291526c3b11a896bb04adc416aa8cfa75592f
| 36,661
|
py
|
Python
|
tccli/services/cat/cat_client.py
|
zqfan/tencentcloud-cli
|
b6ad9fced2a2b340087e4e5522121d405f68b615
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/cat/cat_client.py
|
zqfan/tencentcloud-cli
|
b6ad9fced2a2b340087e4e5522121d405f68b615
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/cat/cat_client.py
|
zqfan/tencentcloud-cli
|
b6ad9fced2a2b340087e4e5522121d405f68b615
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli import __version__
from tccli.utils import Utils
from tccli.exceptions import ConfigurationError
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.cat.v20180409 import cat_client as cat_client_v20180409
from tencentcloud.cat.v20180409 import models as models_v20180409
def doGetRespTimeTrendEx(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.GetRespTimeTrendExRequest()
model.from_json_string(json.dumps(args))
rsp = client.GetRespTimeTrendEx(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyAgentGroup(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyAgentGroupRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyAgentGroup(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeTasksByType(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTasksByTypeRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeTasksByType(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateTaskEx(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateTaskExRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateTaskEx(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doPauseTask(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.PauseTaskRequest()
model.from_json_string(json.dumps(args))
rsp = client.PauseTask(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeAgentGroups(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAgentGroupsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeAgentGroups(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doGetRealAvailRatio(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.GetRealAvailRatioRequest()
model.from_json_string(json.dumps(args))
rsp = client.GetRealAvailRatio(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateAgentGroup(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateAgentGroupRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateAgentGroup(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeTaskDetail(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTaskDetailRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeTaskDetail(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeAlarmTopic(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAlarmTopicRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeAlarmTopic(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeAlarms(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAlarmsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeAlarms(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doGetReturnCodeHistory(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.GetReturnCodeHistoryRequest()
model.from_json_string(json.dumps(args))
rsp = client.GetReturnCodeHistory(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteTasks(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteTasksRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteTasks(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doGetDailyAvailRatio(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.GetDailyAvailRatioRequest()
model.from_json_string(json.dumps(args))
rsp = client.GetDailyAvailRatio(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyTaskEx(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyTaskExRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyTaskEx(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeAgents(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAgentsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeAgents(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeUserLimit(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeUserLimitRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeUserLimit(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeCatLogs(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeCatLogsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeCatLogs(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doVerifyResult(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.VerifyResultRequest()
model.from_json_string(json.dumps(args))
rsp = client.VerifyResult(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doBindAlarmPolicy(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.BindAlarmPolicyRequest()
model.from_json_string(json.dumps(args))
rsp = client.BindAlarmPolicy(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doGetResultSummary(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.GetResultSummaryRequest()
model.from_json_string(json.dumps(args))
rsp = client.GetResultSummary(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteAgentGroup(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteAgentGroupRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteAgentGroup(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doGetAvailRatioHistory(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.GetAvailRatioHistoryRequest()
model.from_json_string(json.dumps(args))
rsp = client.GetAvailRatioHistory(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doRunTask(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.RunTaskRequest()
model.from_json_string(json.dumps(args))
rsp = client.RunTask(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doGetReturnCodeInfo(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.GetReturnCodeInfoRequest()
model.from_json_string(json.dumps(args))
rsp = client.GetReturnCodeInfo(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeAlarmsByTask(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAlarmsByTaskRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeAlarmsByTask(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doGetTaskTotalNumber(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.GetTaskTotalNumberRequest()
model.from_json_string(json.dumps(args))
rsp = client.GetTaskTotalNumber(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20180409": cat_client_v20180409,
}
MODELS_MAP = {
"v20180409": models_v20180409,
}
ACTION_MAP = {
"GetRespTimeTrendEx": doGetRespTimeTrendEx,
"ModifyAgentGroup": doModifyAgentGroup,
"DescribeTasksByType": doDescribeTasksByType,
"CreateTaskEx": doCreateTaskEx,
"PauseTask": doPauseTask,
"DescribeAgentGroups": doDescribeAgentGroups,
"GetRealAvailRatio": doGetRealAvailRatio,
"CreateAgentGroup": doCreateAgentGroup,
"DescribeTaskDetail": doDescribeTaskDetail,
"DescribeAlarmTopic": doDescribeAlarmTopic,
"DescribeAlarms": doDescribeAlarms,
"GetReturnCodeHistory": doGetReturnCodeHistory,
"DeleteTasks": doDeleteTasks,
"GetDailyAvailRatio": doGetDailyAvailRatio,
"ModifyTaskEx": doModifyTaskEx,
"DescribeAgents": doDescribeAgents,
"DescribeUserLimit": doDescribeUserLimit,
"DescribeCatLogs": doDescribeCatLogs,
"VerifyResult": doVerifyResult,
"BindAlarmPolicy": doBindAlarmPolicy,
"GetResultSummary": doGetResultSummary,
"DeleteAgentGroup": doDeleteAgentGroup,
"GetAvailRatioHistory": doGetAvailRatioHistory,
"RunTask": doRunTask,
"GetReturnCodeInfo": doGetReturnCodeInfo,
"DescribeAlarmsByTask": doDescribeAlarmsByTask,
"GetTaskTotalNumber": doGetTaskTotalNumber,
}
AVAILABLE_VERSION_LIST = [
"v20180409",
]
def action_caller():
return ACTION_MAP
def parse_global_arg(parsed_globals):
g_param = parsed_globals
is_exist_profile = True
if not parsed_globals["profile"]:
is_exist_profile = False
g_param["profile"] = "default"
configure_path = os.path.join(os.path.expanduser("~"), ".tccli")
is_conf_exist, conf_path = Utils.file_existed(configure_path, g_param["profile"] + ".configure")
is_cred_exist, cred_path = Utils.file_existed(configure_path, g_param["profile"] + ".credential")
conf = {}
cred = {}
if is_conf_exist:
conf = Utils.load_json_msg(conf_path)
if is_cred_exist:
cred = Utils.load_json_msg(cred_path)
if not (isinstance(conf, dict) and isinstance(cred, dict)):
raise ConfigurationError(
"file: %s or %s is not json format"
% (g_param["profile"] + ".configure", g_param["profile"] + ".credential"))
if OptionsDefine.Token not in cred:
cred[OptionsDefine.Token] = None
if not is_exist_profile:
if os.environ.get(OptionsDefine.ENV_SECRET_ID) and os.environ.get(OptionsDefine.ENV_SECRET_KEY):
cred[OptionsDefine.SecretId] = os.environ.get(OptionsDefine.ENV_SECRET_ID)
cred[OptionsDefine.SecretKey] = os.environ.get(OptionsDefine.ENV_SECRET_KEY)
cred[OptionsDefine.Token] = os.environ.get(OptionsDefine.ENV_TOKEN)
if os.environ.get(OptionsDefine.ENV_REGION):
conf[OptionsDefine.Region] = os.environ.get(OptionsDefine.ENV_REGION)
for param in g_param.keys():
if g_param[param] is None:
if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId, OptionsDefine.Token]:
if param in cred:
g_param[param] = cred[param]
else:
raise ConfigurationError("%s is invalid" % param)
elif param in [OptionsDefine.Region, OptionsDefine.Output]:
if param in conf:
g_param[param] = conf[param]
else:
raise ConfigurationError("%s is invalid" % param)
try:
if g_param[OptionsDefine.ServiceVersion]:
g_param[OptionsDefine.Version] = "v" + g_param[OptionsDefine.ServiceVersion].replace('-', '')
else:
version = conf["cat"][OptionsDefine.Version]
g_param[OptionsDefine.Version] = "v" + version.replace('-', '')
if g_param[OptionsDefine.Endpoint] is None:
g_param[OptionsDefine.Endpoint] = conf["cat"][OptionsDefine.Endpoint]
except Exception as err:
raise ConfigurationError("config file:%s error, %s" % (conf_path, str(err)))
if g_param[OptionsDefine.Version] not in AVAILABLE_VERSION_LIST:
raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return g_param
| 42.480881
| 105
| 0.723057
| 4,209
| 36,661
| 6.058921
| 0.048468
| 0.080464
| 0.226492
| 0.058113
| 0.837111
| 0.83017
| 0.827347
| 0.824524
| 0.820838
| 0.769587
| 0
| 0.009478
| 0.168271
| 36,661
| 862
| 106
| 42.530162
| 0.826873
| 0.007938
| 0
| 0.689243
| 0
| 0
| 0.040589
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038513
| false
| 0
| 0.015936
| 0.001328
| 0.057105
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8eeefbedf5ad4d385fae8b999b5f2617ec2de912
| 55
|
py
|
Python
|
tinder/test/test_basic.py
|
elbaro/tinder
|
68f8d61ccfddade3276e8081820cb32269af572f
|
[
"MIT"
] | 6
|
2018-01-08T11:23:14.000Z
|
2019-05-15T11:42:34.000Z
|
tinder/test/test_basic.py
|
elbaro/tinder
|
68f8d61ccfddade3276e8081820cb32269af572f
|
[
"MIT"
] | 2
|
2018-07-11T06:17:19.000Z
|
2018-07-24T11:54:13.000Z
|
tinder/test/test_basic.py
|
elbaro/tinder
|
68f8d61ccfddade3276e8081820cb32269af572f
|
[
"MIT"
] | null | null | null |
import torch
import tinder
def test_pass():
pass
| 7.857143
| 16
| 0.709091
| 8
| 55
| 4.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.236364
| 55
| 6
| 17
| 9.166667
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.5
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
d92b1e94679a4ec173fa51c311b19e27ae13922f
| 21,086
|
py
|
Python
|
tests/functional/types/test_enum.py
|
remorses/tartiflette-whl
|
92bed13de130a7a88278d7019314135e01281259
|
[
"MIT"
] | 530
|
2019-06-04T11:45:36.000Z
|
2022-03-31T09:29:56.000Z
|
tests/functional/types/test_enum.py
|
remorses/tartiflette-whl
|
92bed13de130a7a88278d7019314135e01281259
|
[
"MIT"
] | 242
|
2019-06-04T11:53:08.000Z
|
2022-03-28T07:06:27.000Z
|
tests/functional/types/test_enum.py
|
remorses/tartiflette-whl
|
92bed13de130a7a88278d7019314135e01281259
|
[
"MIT"
] | 36
|
2019-06-21T06:40:27.000Z
|
2021-11-04T13:11:16.000Z
|
import pytest
from tartiflette import Resolver, create_engine
@pytest.mark.asyncio
async def test_tartiflette_execute_enum_type_output():
schema_sdl = """
enum Test {
Value1
Value2
Value3
}
type Query {
enumTest: Test
}
"""
@Resolver(
"Query.enumTest",
schema_name="test_tartiflette_execute_enum_type_output",
)
async def func_field_resolver(*args, **kwargs):
return "Value1"
ttftt = await create_engine(
schema_sdl, schema_name="test_tartiflette_execute_enum_type_output"
)
result = await ttftt.execute(
"""
query Test{
enumTest
}
""",
operation_name="Test",
)
assert {"data": {"enumTest": "Value1"}} == result
@pytest.mark.asyncio
@pytest.mark.parametrize(
"input_sdl,resolver_response,expected",
[
("MyEnum", None, {"data": {"testField": None}}),
(
"MyEnum",
"UNKNOWN_VALUE",
{
"data": {"testField": None},
"errors": [
{
"message": "Expected value of type MyEnum but received <class 'str'>.",
"path": ["testField"],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
("MyEnum", "ENUM_1", {"data": {"testField": "ENUM_1"}}),
(
"MyEnum!",
None,
{
"data": None,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.testField.",
"path": ["testField"],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"MyEnum!",
"UNKNOWN_VALUE",
{
"data": None,
"errors": [
{
"message": "Expected value of type MyEnum but received <class 'str'>.",
"path": ["testField"],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
("MyEnum!", "ENUM_1", {"data": {"testField": "ENUM_1"}}),
("[MyEnum]", None, {"data": {"testField": None}}),
("[MyEnum]", [None], {"data": {"testField": [None]}}),
(
"[MyEnum]",
"UNKNOWN_VALUE",
{
"data": {"testField": None},
"errors": [
{
"message": "Expected Iterable, but did not find one for field Query.testField.",
"path": ["testField"],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[MyEnum]",
["UNKNOWN_VALUE"],
{
"data": {"testField": [None]},
"errors": [
{
"message": "Expected value of type MyEnum but received <class 'str'>.",
"path": ["testField", 0],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[MyEnum]",
"ENUM_1",
{
"data": {"testField": None},
"errors": [
{
"message": "Expected Iterable, but did not find one for field Query.testField.",
"path": ["testField"],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
("[MyEnum]", ["ENUM_1"], {"data": {"testField": ["ENUM_1"]}}),
(
"[MyEnum]",
["ENUM_1", None],
{"data": {"testField": ["ENUM_1", None]}},
),
(
"[MyEnum]",
["ENUM_1", "UNKNOWN_VALUE"],
{
"data": {"testField": ["ENUM_1", None]},
"errors": [
{
"message": "Expected value of type MyEnum but received <class 'str'>.",
"path": ["testField", 1],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[MyEnum]",
["ENUM_1", "ENUM_2"],
{"data": {"testField": ["ENUM_1", "ENUM_2"]}},
),
(
"[MyEnum]!",
None,
{
"data": None,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.testField.",
"path": ["testField"],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
("[MyEnum]!", [None], {"data": {"testField": [None]}}),
(
"[MyEnum]!",
"UNKNOWN_VALUE",
{
"data": None,
"errors": [
{
"message": "Expected Iterable, but did not find one for field Query.testField.",
"path": ["testField"],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[MyEnum]!",
["UNKNOWN_VALUE"],
{
"data": {"testField": [None]},
"errors": [
{
"message": "Expected value of type MyEnum but received <class 'str'>.",
"path": ["testField", 0],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[MyEnum]!",
"ENUM_1",
{
"data": None,
"errors": [
{
"message": "Expected Iterable, but did not find one for field Query.testField.",
"path": ["testField"],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
("[MyEnum]!", ["ENUM_1"], {"data": {"testField": ["ENUM_1"]}}),
(
"[MyEnum]!",
["ENUM_1", None],
{"data": {"testField": ["ENUM_1", None]}},
),
(
"[MyEnum]!",
["ENUM_1", "UNKNOWN_VALUE"],
{
"data": {"testField": ["ENUM_1", None]},
"errors": [
{
"message": "Expected value of type MyEnum but received <class 'str'>.",
"path": ["testField", 1],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[MyEnum]!",
["ENUM_1", "ENUM_2"],
{"data": {"testField": ["ENUM_1", "ENUM_2"]}},
),
("[MyEnum!]", None, {"data": {"testField": None}}),
(
"[MyEnum!]",
[None],
{
"data": {"testField": None},
"errors": [
{
"message": "Cannot return null for non-nullable field Query.testField.",
"path": ["testField", 0],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[MyEnum!]",
"UNKNOWN_VALUE",
{
"data": {"testField": None},
"errors": [
{
"message": "Expected Iterable, but did not find one for field Query.testField.",
"path": ["testField"],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[MyEnum!]",
["UNKNOWN_VALUE"],
{
"data": {"testField": None},
"errors": [
{
"message": "Expected value of type MyEnum but received <class 'str'>.",
"path": ["testField", 0],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[MyEnum!]",
"ENUM_1",
{
"data": {"testField": None},
"errors": [
{
"message": "Expected Iterable, but did not find one for field Query.testField.",
"path": ["testField"],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
("[MyEnum!]", ["ENUM_1"], {"data": {"testField": ["ENUM_1"]}}),
(
"[MyEnum!]",
["ENUM_1", None],
{
"data": {"testField": None},
"errors": [
{
"message": "Cannot return null for non-nullable field Query.testField.",
"path": ["testField", 1],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[MyEnum!]",
["ENUM_1", "UNKNOWN_VALUE"],
{
"data": {"testField": None},
"errors": [
{
"message": "Expected value of type MyEnum but received <class 'str'>.",
"path": ["testField", 1],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[MyEnum!]",
["ENUM_1", "ENUM_2"],
{"data": {"testField": ["ENUM_1", "ENUM_2"]}},
),
(
"[MyEnum!]!",
None,
{
"data": None,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.testField.",
"path": ["testField"],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[MyEnum!]!",
[None],
{
"data": None,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.testField.",
"path": ["testField", 0],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[MyEnum!]!",
"UNKNOWN_VALUE",
{
"data": None,
"errors": [
{
"message": "Expected Iterable, but did not find one for field Query.testField.",
"path": ["testField"],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[MyEnum!]!",
["UNKNOWN_VALUE"],
{
"data": None,
"errors": [
{
"message": "Expected value of type MyEnum but received <class 'str'>.",
"path": ["testField", 0],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[MyEnum!]!",
"ENUM_1",
{
"data": None,
"errors": [
{
"message": "Expected Iterable, but did not find one for field Query.testField.",
"path": ["testField"],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
("[MyEnum!]!", ["ENUM_1"], {"data": {"testField": ["ENUM_1"]}}),
(
"[MyEnum!]!",
["ENUM_1", None],
{
"data": None,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.testField.",
"path": ["testField", 1],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[MyEnum!]!",
["ENUM_1", "UNKNOWN_VALUE"],
{
"data": None,
"errors": [
{
"message": "Expected value of type MyEnum but received <class 'str'>.",
"path": ["testField", 1],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[MyEnum!]!",
["ENUM_1", "ENUM_2"],
{"data": {"testField": ["ENUM_1", "ENUM_2"]}},
),
(
"[[MyEnum!]!]!",
None,
{
"data": None,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.testField.",
"path": ["testField"],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[[MyEnum!]!]!",
[None],
{
"data": None,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.testField.",
"path": ["testField", 0],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[[MyEnum!]!]!",
"UNKNOWN_VALUE",
{
"data": None,
"errors": [
{
"message": "Expected Iterable, but did not find one for field Query.testField.",
"path": ["testField"],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[[MyEnum!]!]!",
["UNKNOWN_VALUE"],
{
"data": None,
"errors": [
{
"message": "Expected Iterable, but did not find one for field Query.testField.",
"path": ["testField", 0],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[[MyEnum!]!]!",
"ENUM_1",
{
"data": None,
"errors": [
{
"message": "Expected Iterable, but did not find one for field Query.testField.",
"path": ["testField"],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[[MyEnum!]!]!",
["ENUM_1"],
{
"data": None,
"errors": [
{
"message": "Expected Iterable, but did not find one for field Query.testField.",
"path": ["testField", 0],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[[MyEnum!]!]!",
["ENUM_1", None],
{
"data": None,
"errors": [
{
"message": "Expected Iterable, but did not find one for field Query.testField.",
"path": ["testField", 0],
"locations": [{"line": 3, "column": 13}],
},
{
"message": "Cannot return null for non-nullable field Query.testField.",
"path": ["testField", 1],
"locations": [{"line": 3, "column": 13}],
},
],
},
),
(
"[[MyEnum!]!]!",
["ENUM_1", "UNKNOWN_VALUE"],
{
"data": None,
"errors": [
{
"message": "Expected Iterable, but did not find one for field Query.testField.",
"path": ["testField", 0],
"locations": [{"line": 3, "column": 13}],
},
{
"message": "Expected Iterable, but did not find one for field Query.testField.",
"path": ["testField", 1],
"locations": [{"line": 3, "column": 13}],
},
],
},
),
(
"[[MyEnum!]!]!",
["ENUM_1", "ENUM_2"],
{
"data": None,
"errors": [
{
"message": "Expected Iterable, but did not find one for field Query.testField.",
"path": ["testField", 0],
"locations": [{"line": 3, "column": 13}],
},
{
"message": "Expected Iterable, but did not find one for field Query.testField.",
"path": ["testField", 1],
"locations": [{"line": 3, "column": 13}],
},
],
},
),
(
"[[MyEnum!]!]!",
[[None]],
{
"data": None,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.testField.",
"path": ["testField", 0, 0],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[[MyEnum!]!]!",
[["UNKNOWN_VALUE"]],
{
"data": None,
"errors": [
{
"message": "Expected value of type MyEnum but received <class 'str'>.",
"path": ["testField", 0, 0],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
("[[MyEnum!]!]!", [["ENUM_1"]], {"data": {"testField": [["ENUM_1"]]}}),
(
"[[MyEnum!]!]!",
[["ENUM_1", None]],
{
"data": None,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.testField.",
"path": ["testField", 0, 1],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[[MyEnum!]!]!",
[["ENUM_1", "UNKNOWN_VALUE"]],
{
"data": None,
"errors": [
{
"message": "Expected value of type MyEnum but received <class 'str'>.",
"path": ["testField", 0, 1],
"locations": [{"line": 3, "column": 13}],
}
],
},
),
(
"[[MyEnum!]!]!",
[["ENUM_1", "ENUM_2"]],
{"data": {"testField": [["ENUM_1", "ENUM_2"]]}},
),
],
)
async def test_tartiflette_execute_enum_type_advanced(
input_sdl, resolver_response, expected, random_schema_name
):
schema_sdl = """
enum MyEnum {{ ENUM_1, ENUM_2 }}
type Query {{
testField: {}
}}
""".format(
input_sdl
)
@Resolver("Query.testField", schema_name=random_schema_name)
async def func_field_resolver(*args, **kwargs):
return resolver_response
ttftt = await create_engine(schema_sdl, schema_name=random_schema_name)
result = await ttftt.execute(
"""
query Test{
testField
}
""",
operation_name="Test",
)
assert expected == result
| 31.377976
| 104
| 0.317082
| 1,324
| 21,086
| 4.953172
| 0.055136
| 0.036597
| 0.087527
| 0.125038
| 0.939158
| 0.920555
| 0.909881
| 0.898292
| 0.860171
| 0.858188
| 0
| 0.021754
| 0.526937
| 21,086
| 671
| 105
| 31.424739
| 0.635689
| 0
| 0
| 0.589506
| 0
| 0
| 0.304545
| 0.005627
| 0
| 0
| 0
| 0
| 0.003086
| 1
| 0
| false
| 0
| 0.003086
| 0
| 0.006173
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d93d5ea38d2539b722d2a05ec8acf3799e45df08
| 37,961
|
py
|
Python
|
runescape/goldBot/drone.py
|
yida-li/Gaming-Bots
|
69d3bba6fa5cee1b2ee569f6f6d73b845ecca7b5
|
[
"MIT"
] | null | null | null |
runescape/goldBot/drone.py
|
yida-li/Gaming-Bots
|
69d3bba6fa5cee1b2ee569f6f6d73b845ecca7b5
|
[
"MIT"
] | 2
|
2020-10-25T14:27:12.000Z
|
2020-11-13T16:15:49.000Z
|
runescape/goldBot/drone.py
|
yida-li/Gaming-Bots
|
69d3bba6fa5cee1b2ee569f6f6d73b845ecca7b5
|
[
"MIT"
] | null | null | null |
import pyautogui
import random
import time
import keyboard
pyautogui.FAILSAFE = False
start = time.time()
def closeBank(x,y):
arbitraryMovement(random.randint(551, 556)+x,random.randint(111, 125)+y,random.uniform(0.78,1.12))
pyautogui.click()
temp = random.randint(1, 2)
for i in range(temp):
pyautogui.click()
time.sleep(0.2)
def login(i,j):
check=pyautogui.locateOnScreen('authenticatorRecognition/login.png')
time.sleep(random.uniform(3.3, 3.780))
if check:
pyautogui.leftClick(x=449+i,y=319+j,duration=1)
check2=pyautogui.locateOnScreen('authenticatorRecognition/enterusername.png')
time.sleep(random.uniform(3.3, 3.780))
if check2:
pyautogui.leftClick(x=357+i,y=291+j,duration=1)
pyautogui.typewrite('password',interval=1)
pyautogui.leftClick(x=312+i,y=349+j,duration=1)
pyautogui.moveTo(random.randint(0,700),random.randint(0,200))
time.sleep(random.uniform(5.3, 6.780))
pyautogui.leftClick(x=371+i,y=348+j,duration=2)
check4=pyautogui.locateOnScreen('authenticatorRecognition/clickhere.png')
time.sleep(random.uniform(3.3, 3.780))
if not check4:
pyautogui.keyDown('down')
time.sleep(random.uniform(1.3, 2.380))
pyautogui.keyUp('down')
pyautogui.moveTo(x=51+i,y=142+j,duration=1)
pyautogui.dragTo(x=227+i,y=147+j,duration=2,button='right')
def logout(i,j):
closeBank()
time.sleep(random.uniform(1.3, 2.380))
arbitraryMovement(random.randint(640,647)+i,random.randint(502,517)+j,random.uniform(0.5,1.5))
time.sleep(random.uniform(1.3, 2.380))
pyautogui.leftClick()
time.sleep(random.uniform(1.3, 2.380))
arbitraryMovement(random.randint(607,689)+i,random.randint(453,463)+j,random.uniform(0.5,1.5))
pyautogui.leftClick()
# VPN located in the middle of screen
def checkVPN():
check=pyautogui.locateOnScreen('authenticatorRecognition/forti.png')
if check:
pyautogui.leftClick(x=1434,y=525,duration=2)
pyautogui.typewrite('password', interval=1)
pyautogui.leftClick(x=1490,y=587,duration=1)
def normalDeposit(Xi,Xj):
X_herbArray = [
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
]
Y_herbArray = [
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
]
counter1 = 0
while (counter1 < 2):
destiny = random.randint(0, 27)
if (X_herbArray[destiny] != 0 and Y_herbArray[destiny] != 0):
arbitraryMovement(
X_herbArray[destiny]+Xi, Y_herbArray[destiny]+Xj, random.uniform(0.50, 0.150))
pyautogui.click()
time.sleep(random.uniform(0.50, 0.150))
X_herbArray[destiny] = 0
Y_herbArray[destiny] = 0
counter1 = counter1+1
if (X_herbArray[0] != 0):
arbitraryMovement(
X_herbArray[0]+Xi, Y_herbArray[0]+Xj, random.uniform(0.50, 0.150))
pyautogui.click()
# decadence
def advancedDeposit1(Xi,Xj):
X_herbArray = [
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
]
Y_herbArray = [
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
]
counter1 = 0
while (counter1 < 7):
destiny = random.randint(0, 27)
if (X_herbArray[destiny] != 0 and Y_herbArray[destiny] != 0):
arbitraryMovement(
X_herbArray[destiny]+Xi, Y_herbArray[destiny]+Xj, random.uniform(0.50, 0.150))
pyautogui.click()
time.sleep(random.uniform(0.50, 0.150))
X_herbArray[destiny] = 0
Y_herbArray[destiny] = 0
counter1 = counter1+1
if (X_herbArray[0] != 0):
arbitraryMovement(
X_herbArray[0]+Xi, Y_herbArray[0]+Xj, random.uniform(0.50, 0.150))
pyautogui.click()
time.sleep(random.uniform(0.601, 0.799))
if (X_herbArray[17] != 0):
arbitraryMovement(
X_herbArray[10]+Xi, Y_herbArray[10]+Xj, random.uniform(0.50, 0.150))
pyautogui.click()
time.sleep(random.uniform(0.601, 0.799))
if (X_herbArray[27] != 0):
arbitraryMovement(
X_herbArray[27]+Xi, Y_herbArray[27]+Xj, random.uniform(0.50, 0.150))
time.sleep(random.uniform(0.101, 0.199))
pyautogui.click
# arbitrarymoments
def arbitraryMovement(x, y, z):
temp = random.randint(0, 12)
if temp == 0:
pyautogui.moveTo(x, y, z, pyautogui.easeInBack)
elif temp == 1:
pyautogui.moveTo(x, y, z, pyautogui.easeInBounce)
elif temp == 2:
pyautogui.moveTo(x, y, z, pyautogui.easeInCirc)
elif temp == 3:
pyautogui.moveTo(x, y, z, pyautogui.easeInCubic)
elif temp == 4:
pyautogui.moveTo(x, y, z, pyautogui.easeInElastic)
elif temp == 5:
pyautogui.moveTo(x, y, z, pyautogui.easeInExpo)
elif temp == 6:
pyautogui.moveTo(x, y, z, pyautogui.easeInOutBack)
elif temp == 7:
pyautogui.moveTo(x, y, z, pyautogui.easeInExpo)
elif temp == 8:
pyautogui.moveTo(x, y, z, pyautogui.easeInOutBounce)
elif temp == 9:
pyautogui.moveTo(x, y, z, pyautogui.easeInOutCirc)
elif temp == 10:
pyautogui.moveTo(x, y, z, pyautogui.easeInOutCubic)
elif temp == 11:
pyautogui.moveTo(x, y, z, pyautogui.easeInOutElastic)
elif temp == 12:
pyautogui.moveTo(x, y, z, pyautogui.easeInOutExpo)
# unpredictability
def sleepsegment():
temp= random.randint(0,4)
if temp ==0:
time.sleep(random.uniform(0.700, 0.800))
elif temp == 1:
time.sleep(random.uniform(0.500, 0.900))
elif temp == 2:
time.sleep(random.uniform(0.500, 0.600))
elif temp == 3:
time.sleep(random.uniform(0.800, 0.900))
elif temp == 4:
time.sleep(random.uniform(0.600, 0.800))
def SoloClean(x):
for i in range(x):
arbitraryMovement(random.randint(
429 - 4, 429 + 4), random.randint(198 - 3, 198 + 3), random.uniform(1.89, 2.11))
pyautogui.click(button='left')
sleepsegment()
closeBank(0,0)
# print('phase')
sleepsegment()
X_herbArray = [
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
]
Y_herbArray = [
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
]
# print('phase')
counter = 0
# print(X_herbArray)
# print(Y_herbArray)
while (counter < 27):
destiny = random.randint(0, 27)
if (X_herbArray[destiny] != 0 and Y_herbArray[destiny] != 0):
# pyautogui.moveTo(X_herbArray[destiny], Y_herbArray[destiny])
pyautogui.click(
button='left', x=X_herbArray[destiny], y=Y_herbArray[destiny])
# print(destiny)
X_herbArray[destiny] = 0
Y_herbArray[destiny] = 0
counter = counter+1
# print('phase')
time.sleep(random.uniform(2.4000, 2.500))
pyautogui.moveTo(
random.randint(256, 256 + 97),
random.randint(190, 190 + 53),random.uniform(0.5,0.953)
)
pyautogui.click(button='left')
time.sleep(random.uniform(0.100, 0.200))
pyautogui.click(button='left')
time.sleep(random.uniform(0.634, 0.780))
bankAll(0,0)
#normalDeposit()
end = time.time()
print(end - start)
def DuoClean(x):
for i in range(x):
arbitraryMovement(random.randint(
429 - 4, 429 + 4), random.randint(198 - 3, 198 + 3), random.uniform(1.89, 2.11))
pyautogui.click(button='left')
sleepsegment()
closeBank(0,0)
sleepsegment()
X_herbArray = [
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
]
Y_herbArray = [
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
]
arbitraryMovement(random.randint(
429 - 4, 429 + 4), random.randint(198 - 3, 198 + 3)+534, random.uniform(1.89, 2.11))
pyautogui.click(button='left')
X_herbArray2 = [
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
]
Y_herbArray2 = [
random.randint(247, 258)+534,
random.randint(283, 296)+534,
random.randint(321, 332)+534,
random.randint(358, 371)+534,
random.randint(389, 406)+534,
random.randint(431, 444)+534,
random.randint(463, 479)+534,
random.randint(247, 258)+534,
random.randint(283, 296)+534,
random.randint(321, 332)+534,
random.randint(358, 371)+534,
random.randint(389, 406)+534,
random.randint(431, 444)+534,
random.randint(463, 479)+534,
random.randint(247, 258)+534,
random.randint(283, 296)+534,
random.randint(321, 332)+534,
random.randint(358, 371)+534,
random.randint(389, 406)+534,
random.randint(431, 444)+534,
random.randint(463, 479)+534,
random.randint(247, 258)+534,
random.randint(283, 296)+534,
random.randint(321, 332)+534,
random.randint(358, 371)+534,
random.randint(389, 406)+534,
random.randint(431, 444)+534,
random.randint(463, 479)+534,
]
sleepsegment()
closeBank(0,534)
sleepsegment()
counter = 0
while (counter < 27):
destiny = random.randint(0, 27)
if (X_herbArray[destiny] != 0 and Y_herbArray[destiny] != 0):
# pyautogui.moveTo(X_herbArray[destiny], Y_herbArray[destiny])
pyautogui.click(
button='left', x=X_herbArray[destiny], y=Y_herbArray[destiny])
# print(destiny)
X_herbArray[destiny] = 0
Y_herbArray[destiny] = 0
pyautogui.click(
button='left', x=X_herbArray2[destiny], y=Y_herbArray2[destiny])
# print(destiny)
X_herbArray2[destiny] = 0
Y_herbArray2[destiny] = 0
counter = counter+1
time.sleep(random.uniform(1.4000, 2.000))
arbitrary=random.randint(1,2)
for i in range(arbitrary):
pyautogui.moveTo(
random.randint(256, 256 + 97),
random.randint(190, 190 + 53),random.uniform(0.5,0.953)
)
pyautogui.click(button='left')
time.sleep(random.uniform(0.100, 0.200))
pyautogui.click(button='left')
for j in range(arbitrary):
pyautogui.moveTo(
random.randint(256, 256 + 97),
random.randint(190, 190 + 53)+534,random.uniform(0.5,0.953)
)
pyautogui.click(button='left')
time.sleep(random.uniform(0.100, 0.200))
pyautogui.click(button='left')
time.sleep(random.uniform(0.634, 0.780))
bankAll(0,0)
bankAll(0,534)
#normalDeposit()
end = time.time()
print(end - start)
def SoloMix(x):
for i in range(x):
X_firstHalf = [
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
]
X_secondHalf = [
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
]
Y_firstHalf = [
random.randint(247, 258),
random.randint(247, 258),
random.randint(247, 258),
random.randint(247, 258),
random.randint(283, 296),
random.randint(283, 296),
random.randint(283, 296),
random.randint(283, 296),
random.randint(321, 332),
random.randint(321, 332),
random.randint(321, 332),
random.randint(321, 332),
random.randint(358, 371),
random.randint(358, 371),
]
Y_secondHalf = [
random.randint(358, 371),
random.randint(358, 371),
random.randint(389, 406),
random.randint(389, 406),
random.randint(389, 406),
random.randint(389, 406),
random.randint(431, 444),
random.randint(431, 444),
random.randint(431, 444),
random.randint(431, 444),
random.randint(463, 479),
random.randint(463, 479),
random.randint(463, 479),
random.randint(463, 479),
]
arbitraryMovement(random.randint(0, 560), random.randint(0, 332), 0.5)
Choice =random.randint(0,1)
if Choice==1:
xlocation = random.randint(380, 382)
ylocation = random.randint(198 -1, 198+1 )
arbitraryMovement(xlocation, ylocation, 0.3)
pyautogui.click(button='right')
sleepsegment()
arbitraryMovement(xlocation, ylocation + 85, 0.5)
pyautogui.click()
sleepsegment()
arbitraryMovement(
random.randint(429 - 4, 429 + 4),
random.randint(198 - 3, 198 + 3), random.uniform(0.25,0.45)
)
pyautogui.click()
else:
xlocation = random.randint(429 - 4, 429 + 4)
ylocation = random.randint(198 - 3, 198 + 3)
arbitraryMovement(
xlocation,
ylocation, random.uniform(0.25,0.45)
)
pyautogui.click(button='right')
sleepsegment()
arbitraryMovement(xlocation, ylocation + 85, 0.5)
pyautogui.click()
sleepsegment()
arbitraryMovement(
random.randint(380, 382),
random.randint(198 -1, 198+1 ), random.uniform(0.25,0.45)
)
pyautogui.click()
closeBank(0,0)
sleepsegment()
temp = random.randint(0, 13)
arbitraryMovement(X_firstHalf[temp], Y_firstHalf[temp], 0.2)
pyautogui.click()
sleepsegment()
temp2 = random.randint(0, 13)
arbitraryMovement(
X_secondHalf[temp2], Y_secondHalf[temp2], 0.2)
pyautogui.click()
sleepsegment()
arbitraryMovement(random.randint(
227, 290), random.randint(433, 480), 0.2)
pyautogui.click()
time.sleep(random.uniform(0.100, 0.200))
pyautogui.click()
time.sleep(random.uniform(8.800, 9.200))
arbitraryMovement(
random.randint(256, 256 + 97),
random.randint(190, 190 + 53), 0.5
)
pyautogui.click()
time.sleep(random.uniform(0.634, 0.780))
moreRandom=random.randint(0,7)
if moreRandom==0:
normalDeposit(0,0)
else:
bankAll(0,0)
time.sleep(random.uniform(0.634, 0.780))
def DualTask(x,y):
time.sleep(random.uniform(5.4000, 5.500))
login(x,y)
time.sleep(random.uniform(1.3, 2.380))
arbitraryMovement(
random.randint(256, 256 + 97),
random.randint(190, 190 + 53), 0.5
)
pyautogui.click()
for i in range(2):
SoloMix(50)
arbitraryMovement(random.randint(318,328),random.randint(80,85),random.uniform(0.5,1.5))
pyautogui.leftClick()
time.sleep(random.uniform(1.3, 2.380))
SoloClean(50)
arbitraryMovement(random.randint(77,85),random.randint(82,89),random.uniform(0.5,1.5))
pyautogui.leftClick()
time.sleep(random.uniform(1.3, 2.380))
logout(x,y)
def DoubleMix(x):
for i in range(x):
X_firstHalf = [
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
]
X_secondHalf = [
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
]
Y_firstHalf = [
random.randint(247, 258),
random.randint(247, 258),
random.randint(247, 258),
random.randint(247, 258),
random.randint(283, 296),
random.randint(283, 296),
random.randint(283, 296),
random.randint(283, 296),
random.randint(321, 332),
random.randint(321, 332),
random.randint(321, 332),
random.randint(321, 332),
random.randint(358, 371),
random.randint(358, 371),
]
Y_secondHalf = [
random.randint(358, 371),
random.randint(358, 371),
random.randint(389, 406),
random.randint(389, 406),
random.randint(389, 406),
random.randint(389, 406),
random.randint(431, 444),
random.randint(431, 444),
random.randint(431, 444),
random.randint(431, 444),
random.randint(463, 479),
random.randint(463, 479),
random.randint(463, 479),
random.randint(463, 479),
]
X1_firstHalf = [
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
]
X1_secondHalf = [
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
random.randint(576, 586),
random.randint(616, 630),
random.randint(656, 670),
random.randint(699, 716),
]
Y1_firstHalf = [
random.randint(247, 258) + 534,
random.randint(247, 258) + 534,
random.randint(247, 258) + 534,
random.randint(247, 258) + 534,
random.randint(283, 296) + 534,
random.randint(283, 296) + 534,
random.randint(283, 296) + 534,
random.randint(283, 296) + 534,
random.randint(321, 332) + 534,
random.randint(321, 332) + 534,
random.randint(321, 332) + 534,
random.randint(321, 332) + 534,
random.randint(358, 371) + 534,
random.randint(358, 371) + 534,
]
Y1_secondHalf = [
random.randint(358, 371) + 534,
random.randint(358, 371) + 534,
random.randint(389, 406) + 534,
random.randint(389, 406) + 534,
random.randint(389, 406) + 534,
random.randint(389, 406) + 534,
random.randint(431, 444) + 534,
random.randint(431, 444) + 534,
random.randint(431, 444) + 534,
random.randint(431, 444) + 534,
random.randint(463, 479) + 534,
random.randint(463, 479) + 534,
random.randint(463, 479) + 534,
random.randint(463, 479) + 534,
]
arbitraryMovement(random.randint(0, 560), random.randint(0, 332), 0.5)
Choice =random.randint(0,1)
if Choice==1:
xlocation = random.randint(380, 382)
ylocation = random.randint(198 -1, 198+1 )
arbitraryMovement(xlocation, ylocation, 0.3)
pyautogui.click(button='right')
sleepsegment()
arbitraryMovement(xlocation, ylocation + 85, 0.5)
pyautogui.click()
sleepsegment()
arbitraryMovement(
random.randint(429 - 4, 429 + 4),
random.randint(198 - 3, 198 + 3), random.uniform(0.25,0.45)
)
pyautogui.click()
#second screen
arbitraryMovement(xlocation, ylocation+534, 0.3)
pyautogui.click(button='right')
sleepsegment()
arbitraryMovement(xlocation, ylocation + 85+534, 0.5)
pyautogui.click()
sleepsegment()
arbitraryMovement(
random.randint(429 - 4, 429 + 4),
random.randint(198 - 3, 198 + 3)+534, random.uniform(0.25,0.45)
)
pyautogui.click()
else:
xlocation = random.randint(429 - 4, 429 + 4)
ylocation = random.randint(198 - 3, 198 + 3)
arbitraryMovement(
xlocation,
ylocation, random.uniform(0.25,0.45)
)
time.sleep(random.uniform(0.1,0.2))
pyautogui.click(button='right')
sleepsegment()
arbitraryMovement(xlocation, ylocation + 85, 0.5)
pyautogui.click()
sleepsegment()
arbitraryMovement(
random.randint(380, 382),
random.randint(198 -1, 198+1 ), random.uniform(0.25,0.45)
)
pyautogui.click()
#
#
arbitraryMovement(
xlocation,
ylocation+534, random.uniform(0.25,0.45)
)
time.sleep(random.uniform(0.1,0.2))
pyautogui.click(button='right')
sleepsegment()
arbitraryMovement(xlocation, ylocation + 85+534, 0.5)
pyautogui.click()
sleepsegment()
arbitraryMovement(
random.randint(380, 382),
random.randint(198 -1, 198+1 )+534, random.uniform(0.25,0.45)
)
pyautogui.click()
closeBank(0,0)
closeBank(0,534)
sleepsegment()
temp = random.randint(0, 13)
arbitraryMovement(X1_firstHalf[temp], Y1_firstHalf[temp], 0.2)
pyautogui.click()
sleepsegment()
temp2 = random.randint(0, 13)
arbitraryMovement(
X1_secondHalf[temp2], Y1_secondHalf[temp2], 0.2)
pyautogui.click()
sleepsegment()
arbitraryMovement(random.randint(
227, 290), random.randint(433, 480)+534, 0.2)
pyautogui.click()
time.sleep(random.uniform(0.100, 0.200))
pyautogui.click()
temp = random.randint(0, 13)
arbitraryMovement(X_firstHalf[temp], Y_firstHalf[temp], 0.2)
pyautogui.click()
sleepsegment()
temp2 = random.randint(0, 13)
arbitraryMovement(
X_secondHalf[temp2], Y_secondHalf[temp2], 0.2)
pyautogui.click()
sleepsegment()
arbitraryMovement(random.randint(
227, 290), random.randint(433, 480), 0.2)
pyautogui.click()
time.sleep(random.uniform(0.100, 0.200))
pyautogui.click()
time.sleep(random.uniform(8.800, 9.200))
arbitraryMovement(
random.randint(256, 256 + 97),
random.randint(190, 190 + 53), 0.5
)
pyautogui.click()
arbitraryMovement(
random.randint(256, 256 + 97),
random.randint(190, 190 + 53)+534, 0.5
)
pyautogui.click()
time.sleep(random.uniform(0.634, 0.780))
bankAll(0,0)
bankAll(0,534)
time.sleep(random.uniform(0.634, 0.780))
def speedclick():
while True:
#click all inventory
if keyboard.is_pressed('i'):
time.sleep(0.1)
X_herbArray = [
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(576, 586),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(616, 630),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(656, 670),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
random.randint(699, 716),
]
Y_herbArray = [
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
random.randint(247, 258),
random.randint(283, 296),
random.randint(321, 332),
random.randint(358, 371),
random.randint(389, 406),
random.randint(431, 444),
random.randint(463, 479),
]
# print('phase')
counter = 0
# print(X_herbArray)
# print(Y_herbArray)
while (counter < 27):
destiny = random.randint(0, 27)
if (X_herbArray[destiny] != 0 and Y_herbArray[destiny] != 0):
# pyautogui.moveTo(X_herbArray[destiny], Y_herbArray[destiny])
pyautogui.click(
button='left', x=X_herbArray[destiny], y=Y_herbArray[destiny])
# print(destiny)
X_herbArray[destiny] = 0
Y_herbArray[destiny] = 0
counter = counter+1
#click bank area
if keyboard.is_pressed('o'):
arbitraryMovement(
random.randint(256, 256 + 97),
random.randint(190, 190 + 53), random.uniform(0.2,0.5)
)
pyautogui.click()
#close bank
if keyboard.is_pressed('u'):
closeBank()
#click 1 thing to withdraw
if keyboard.is_pressed('y'):
arbitraryMovement(
random.randint(418, 430),random.randint(193, 206), random.uniform(0.2,0.5))
pyautogui.click()
#disposite allo
if keyboard.is_pressed('p'):
bankAll()
def bankAll(x,y):
arbitraryMovement(
random.randint(437, 458)+x,random.randint(333, 349)+y, random.uniform(0.2,0.5))
pyautogui.click()
#while True:
#SoloMix(40)
#checkVPN()
#SoloMix(500)
#SoloHybrid()
#login()n
#logout()
#login()speedclick()
#speedclick()
#lamda for 2 cordinates
DoubleMix(200)
| 33.445815
| 102
| 0.532415
| 4,247
| 37,961
| 4.736049
| 0.05651
| 0.381326
| 0.050114
| 0.059511
| 0.877797
| 0.861788
| 0.842995
| 0.83509
| 0.817838
| 0.806702
| 0
| 0.177722
| 0.335212
| 37,961
| 1,135
| 103
| 33.445815
| 0.619314
| 0.018677
| 0
| 0.8308
| 0
| 0
| 0.007094
| 0.003977
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015198
| false
| 0.002026
| 0.004053
| 0
| 0.01925
| 0.002026
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d948b0f27a04d7380fbd16914881faee2d113d3e
| 2,686
|
py
|
Python
|
tests/edge_oplus_ominus.py
|
Golbstein/python-graphslam
|
cccc022b2f5d797f6511bda9e7dd3a24af403016
|
[
"MIT"
] | 97
|
2020-02-24T00:34:56.000Z
|
2022-03-23T11:43:19.000Z
|
tests/edge_oplus_ominus.py
|
Golbstein/python-graphslam
|
cccc022b2f5d797f6511bda9e7dd3a24af403016
|
[
"MIT"
] | 3
|
2020-02-18T15:46:40.000Z
|
2022-03-17T02:01:51.000Z
|
tests/edge_oplus_ominus.py
|
Golbstein/python-graphslam
|
cccc022b2f5d797f6511bda9e7dd3a24af403016
|
[
"MIT"
] | 13
|
2020-06-09T08:27:27.000Z
|
2021-11-23T14:05:14.000Z
|
# Copyright (c) 2020 Jeff Irion and contributors
"""A custom edge type used for unit tests.
"""
import numpy as np
from graphslam.edge.base_edge import BaseEdge
# pylint: disable=abstract-method
class EdgeOPlus(BaseEdge):
"""A simple edge class for testing.
"""
def calc_error(self):
"""A simple "error" method."""
return (self.vertices[0].pose + self.vertices[1].pose).to_array()
def calc_jacobians(self):
"""Calculate the Jacobians."""
return [np.dot(self.vertices[0].pose.jacobian_self_oplus_other_wrt_self(self.vertices[1].pose), self.vertices[0].pose.jacobian_boxplus()),
np.dot(self.vertices[0].pose.jacobian_self_oplus_other_wrt_other(self.vertices[1].pose), self.vertices[1].pose.jacobian_boxplus())]
# pylint: disable=abstract-method
class EdgeOMinus(BaseEdge):
"""A simple edge class for testing.
"""
def calc_error(self):
"""A simple "error" method."""
return (self.vertices[0].pose - self.vertices[1].pose).to_array()
def calc_jacobians(self):
"""Calculate the Jacobians."""
return [np.dot(self.vertices[0].pose.jacobian_self_ominus_other_wrt_self(self.vertices[1].pose), self.vertices[0].pose.jacobian_boxplus()),
np.dot(self.vertices[0].pose.jacobian_self_ominus_other_wrt_other(self.vertices[1].pose), self.vertices[1].pose.jacobian_boxplus())]
# pylint: disable=abstract-method
class EdgeOPlusCompact(BaseEdge):
"""A simple edge class for testing.
"""
def calc_error(self):
"""A simple "error" method."""
return (self.vertices[0].pose + self.vertices[1].pose).to_compact()
def calc_jacobians(self):
"""Calculate the Jacobians."""
return [np.dot(self.vertices[0].pose.jacobian_self_oplus_other_wrt_self_compact(self.vertices[1].pose), self.vertices[0].pose.jacobian_boxplus()),
np.dot(self.vertices[0].pose.jacobian_self_oplus_other_wrt_other_compact(self.vertices[1].pose), self.vertices[1].pose.jacobian_boxplus())]
# pylint: disable=abstract-method
class EdgeOMinusCompact(BaseEdge):
"""A simple edge class for testing.
"""
def calc_error(self):
"""A simple "error" method."""
return (self.vertices[0].pose - self.vertices[1].pose).to_compact()
def calc_jacobians(self):
"""Calculate the Jacobians."""
return [np.dot(self.vertices[0].pose.jacobian_self_ominus_other_wrt_self_compact(self.vertices[1].pose), self.vertices[0].pose.jacobian_boxplus()),
np.dot(self.vertices[0].pose.jacobian_self_ominus_other_wrt_other_compact(self.vertices[1].pose), self.vertices[1].pose.jacobian_boxplus())]
| 37.830986
| 156
| 0.69099
| 366
| 2,686
| 4.893443
| 0.153005
| 0.214405
| 0.116136
| 0.15187
| 0.902289
| 0.884422
| 0.884422
| 0.884422
| 0.884422
| 0.884422
| 0
| 0.015957
| 0.160089
| 2,686
| 70
| 157
| 38.371429
| 0.777926
| 0.212584
| 0
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0
| 0.076923
| 0
| 0.846154
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
d981e9ba8041118822f2896f610b29e7022c81d9
| 27,700
|
py
|
Python
|
sdk/python/pulumi_snowflake/procedure.py
|
Hacker0x01/pulumi-snowflake
|
f6ebcf2c3f73b103a7c2001fae231998ce1323b2
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2021-07-01T17:03:33.000Z
|
2022-03-01T19:29:04.000Z
|
sdk/python/pulumi_snowflake/procedure.py
|
Hacker0x01/pulumi-snowflake
|
f6ebcf2c3f73b103a7c2001fae231998ce1323b2
|
[
"ECL-2.0",
"Apache-2.0"
] | 102
|
2021-07-14T13:12:58.000Z
|
2022-03-31T18:34:04.000Z
|
sdk/python/pulumi_snowflake/procedure.py
|
Hacker0x01/pulumi-snowflake
|
f6ebcf2c3f73b103a7c2001fae231998ce1323b2
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-03-25T07:24:45.000Z
|
2022-03-25T07:24:45.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ProcedureArgs', 'Procedure']
@pulumi.input_type
class ProcedureArgs:
def __init__(__self__, *,
database: pulumi.Input[str],
return_type: pulumi.Input[str],
schema: pulumi.Input[str],
statement: pulumi.Input[str],
arguments: Optional[pulumi.Input[Sequence[pulumi.Input['ProcedureArgumentArgs']]]] = None,
comment: Optional[pulumi.Input[str]] = None,
execute_as: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
null_input_behavior: Optional[pulumi.Input[str]] = None,
return_behavior: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Procedure resource.
:param pulumi.Input[str] database: The database in which to create the procedure. Don't use the | character.
:param pulumi.Input[str] return_type: The return type of the procedure
:param pulumi.Input[str] schema: The schema in which to create the procedure. Don't use the | character.
:param pulumi.Input[str] statement: Specifies the javascript code used to create the procedure.
:param pulumi.Input[Sequence[pulumi.Input['ProcedureArgumentArgs']]] arguments: List of the arguments for the procedure
:param pulumi.Input[str] comment: Specifies a comment for the procedure.
:param pulumi.Input[str] execute_as: Sets execute context - see caller's rights and owner's rights
:param pulumi.Input[str] name: Specifies the identifier for the procedure; does not have to be unique for the schema in which the procedure is created. Don't use the | character.
:param pulumi.Input[str] null_input_behavior: Specifies the behavior of the procedure when called with null inputs.
:param pulumi.Input[str] return_behavior: Specifies the behavior of the function when returning results
"""
pulumi.set(__self__, "database", database)
pulumi.set(__self__, "return_type", return_type)
pulumi.set(__self__, "schema", schema)
pulumi.set(__self__, "statement", statement)
if arguments is not None:
pulumi.set(__self__, "arguments", arguments)
if comment is not None:
pulumi.set(__self__, "comment", comment)
if execute_as is not None:
pulumi.set(__self__, "execute_as", execute_as)
if name is not None:
pulumi.set(__self__, "name", name)
if null_input_behavior is not None:
pulumi.set(__self__, "null_input_behavior", null_input_behavior)
if return_behavior is not None:
pulumi.set(__self__, "return_behavior", return_behavior)
@property
@pulumi.getter
def database(self) -> pulumi.Input[str]:
"""
The database in which to create the procedure. Don't use the | character.
"""
return pulumi.get(self, "database")
@database.setter
def database(self, value: pulumi.Input[str]):
pulumi.set(self, "database", value)
@property
@pulumi.getter(name="returnType")
def return_type(self) -> pulumi.Input[str]:
"""
The return type of the procedure
"""
return pulumi.get(self, "return_type")
@return_type.setter
def return_type(self, value: pulumi.Input[str]):
pulumi.set(self, "return_type", value)
@property
@pulumi.getter
def schema(self) -> pulumi.Input[str]:
"""
The schema in which to create the procedure. Don't use the | character.
"""
return pulumi.get(self, "schema")
@schema.setter
def schema(self, value: pulumi.Input[str]):
pulumi.set(self, "schema", value)
@property
@pulumi.getter
def statement(self) -> pulumi.Input[str]:
"""
Specifies the javascript code used to create the procedure.
"""
return pulumi.get(self, "statement")
@statement.setter
def statement(self, value: pulumi.Input[str]):
pulumi.set(self, "statement", value)
@property
@pulumi.getter
def arguments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ProcedureArgumentArgs']]]]:
"""
List of the arguments for the procedure
"""
return pulumi.get(self, "arguments")
@arguments.setter
def arguments(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ProcedureArgumentArgs']]]]):
pulumi.set(self, "arguments", value)
@property
@pulumi.getter
def comment(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a comment for the procedure.
"""
return pulumi.get(self, "comment")
@comment.setter
def comment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comment", value)
@property
@pulumi.getter(name="executeAs")
def execute_as(self) -> Optional[pulumi.Input[str]]:
"""
Sets execute context - see caller's rights and owner's rights
"""
return pulumi.get(self, "execute_as")
@execute_as.setter
def execute_as(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "execute_as", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the identifier for the procedure; does not have to be unique for the schema in which the procedure is created. Don't use the | character.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nullInputBehavior")
def null_input_behavior(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the behavior of the procedure when called with null inputs.
"""
return pulumi.get(self, "null_input_behavior")
@null_input_behavior.setter
def null_input_behavior(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "null_input_behavior", value)
@property
@pulumi.getter(name="returnBehavior")
def return_behavior(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the behavior of the function when returning results
"""
return pulumi.get(self, "return_behavior")
@return_behavior.setter
def return_behavior(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "return_behavior", value)
@pulumi.input_type
class _ProcedureState:
def __init__(__self__, *,
arguments: Optional[pulumi.Input[Sequence[pulumi.Input['ProcedureArgumentArgs']]]] = None,
comment: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
execute_as: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
null_input_behavior: Optional[pulumi.Input[str]] = None,
return_behavior: Optional[pulumi.Input[str]] = None,
return_type: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input[str]] = None,
statement: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Procedure resources.
:param pulumi.Input[Sequence[pulumi.Input['ProcedureArgumentArgs']]] arguments: List of the arguments for the procedure
:param pulumi.Input[str] comment: Specifies a comment for the procedure.
:param pulumi.Input[str] database: The database in which to create the procedure. Don't use the | character.
:param pulumi.Input[str] execute_as: Sets execute context - see caller's rights and owner's rights
:param pulumi.Input[str] name: Specifies the identifier for the procedure; does not have to be unique for the schema in which the procedure is created. Don't use the | character.
:param pulumi.Input[str] null_input_behavior: Specifies the behavior of the procedure when called with null inputs.
:param pulumi.Input[str] return_behavior: Specifies the behavior of the function when returning results
:param pulumi.Input[str] return_type: The return type of the procedure
:param pulumi.Input[str] schema: The schema in which to create the procedure. Don't use the | character.
:param pulumi.Input[str] statement: Specifies the javascript code used to create the procedure.
"""
if arguments is not None:
pulumi.set(__self__, "arguments", arguments)
if comment is not None:
pulumi.set(__self__, "comment", comment)
if database is not None:
pulumi.set(__self__, "database", database)
if execute_as is not None:
pulumi.set(__self__, "execute_as", execute_as)
if name is not None:
pulumi.set(__self__, "name", name)
if null_input_behavior is not None:
pulumi.set(__self__, "null_input_behavior", null_input_behavior)
if return_behavior is not None:
pulumi.set(__self__, "return_behavior", return_behavior)
if return_type is not None:
pulumi.set(__self__, "return_type", return_type)
if schema is not None:
pulumi.set(__self__, "schema", schema)
if statement is not None:
pulumi.set(__self__, "statement", statement)
@property
@pulumi.getter
def arguments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ProcedureArgumentArgs']]]]:
"""
List of the arguments for the procedure
"""
return pulumi.get(self, "arguments")
@arguments.setter
def arguments(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ProcedureArgumentArgs']]]]):
pulumi.set(self, "arguments", value)
@property
@pulumi.getter
def comment(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a comment for the procedure.
"""
return pulumi.get(self, "comment")
@comment.setter
def comment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comment", value)
@property
@pulumi.getter
def database(self) -> Optional[pulumi.Input[str]]:
"""
The database in which to create the procedure. Don't use the | character.
"""
return pulumi.get(self, "database")
@database.setter
def database(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database", value)
@property
@pulumi.getter(name="executeAs")
def execute_as(self) -> Optional[pulumi.Input[str]]:
"""
Sets execute context - see caller's rights and owner's rights
"""
return pulumi.get(self, "execute_as")
@execute_as.setter
def execute_as(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "execute_as", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the identifier for the procedure; does not have to be unique for the schema in which the procedure is created. Don't use the | character.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nullInputBehavior")
def null_input_behavior(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the behavior of the procedure when called with null inputs.
"""
return pulumi.get(self, "null_input_behavior")
@null_input_behavior.setter
def null_input_behavior(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "null_input_behavior", value)
@property
@pulumi.getter(name="returnBehavior")
def return_behavior(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the behavior of the function when returning results
"""
return pulumi.get(self, "return_behavior")
@return_behavior.setter
def return_behavior(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "return_behavior", value)
@property
@pulumi.getter(name="returnType")
def return_type(self) -> Optional[pulumi.Input[str]]:
"""
The return type of the procedure
"""
return pulumi.get(self, "return_type")
@return_type.setter
def return_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "return_type", value)
@property
@pulumi.getter
def schema(self) -> Optional[pulumi.Input[str]]:
"""
The schema in which to create the procedure. Don't use the | character.
"""
return pulumi.get(self, "schema")
@schema.setter
def schema(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schema", value)
@property
@pulumi.getter
def statement(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the javascript code used to create the procedure.
"""
return pulumi.get(self, "statement")
@statement.setter
def statement(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statement", value)
class Procedure(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
arguments: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProcedureArgumentArgs']]]]] = None,
comment: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
execute_as: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
null_input_behavior: Optional[pulumi.Input[str]] = None,
return_behavior: Optional[pulumi.Input[str]] = None,
return_type: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input[str]] = None,
statement: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Example Usage
```python
import pulumi
import pulumi_snowflake as snowflake
db = snowflake.Schema("db", data_retention_days=1)
schema = snowflake.Schema("schema",
database=snowflake_database["db"]["name"],
data_retention_days=1)
proc = snowflake.Procedure("proc",
database=snowflake_database["db"]["name"],
schema=schema.name,
arguments=[
snowflake.ProcedureArgumentArgs(
name="arg1",
type="varchar",
),
snowflake.ProcedureArgumentArgs(
name="arg2",
type="DATE",
),
],
comment="Procedure with 2 arguments",
return_type="VARCHAR",
execute_as="CALLER",
return_behavior="IMMUTABLE",
null_input_behavior="RETURNS NULL ON NULL INPUT",
statement=\"\"\"var X=1
return X
\"\"\")
```
## Import
# format is database name | schema name | stored procedure name | <list of arg types, separated with '-'>
```sh
$ pulumi import snowflake:index/procedure:Procedure example 'dbName|schemaName|procedureName|varchar-varchar-varchar'
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProcedureArgumentArgs']]]] arguments: List of the arguments for the procedure
:param pulumi.Input[str] comment: Specifies a comment for the procedure.
:param pulumi.Input[str] database: The database in which to create the procedure. Don't use the | character.
:param pulumi.Input[str] execute_as: Sets execute context - see caller's rights and owner's rights
:param pulumi.Input[str] name: Specifies the identifier for the procedure; does not have to be unique for the schema in which the procedure is created. Don't use the | character.
:param pulumi.Input[str] null_input_behavior: Specifies the behavior of the procedure when called with null inputs.
:param pulumi.Input[str] return_behavior: Specifies the behavior of the function when returning results
:param pulumi.Input[str] return_type: The return type of the procedure
:param pulumi.Input[str] schema: The schema in which to create the procedure. Don't use the | character.
:param pulumi.Input[str] statement: Specifies the javascript code used to create the procedure.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ProcedureArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Example Usage
```python
import pulumi
import pulumi_snowflake as snowflake
db = snowflake.Schema("db", data_retention_days=1)
schema = snowflake.Schema("schema",
database=snowflake_database["db"]["name"],
data_retention_days=1)
proc = snowflake.Procedure("proc",
database=snowflake_database["db"]["name"],
schema=schema.name,
arguments=[
snowflake.ProcedureArgumentArgs(
name="arg1",
type="varchar",
),
snowflake.ProcedureArgumentArgs(
name="arg2",
type="DATE",
),
],
comment="Procedure with 2 arguments",
return_type="VARCHAR",
execute_as="CALLER",
return_behavior="IMMUTABLE",
null_input_behavior="RETURNS NULL ON NULL INPUT",
statement=\"\"\"var X=1
return X
\"\"\")
```
## Import
# format is database name | schema name | stored procedure name | <list of arg types, separated with '-'>
```sh
$ pulumi import snowflake:index/procedure:Procedure example 'dbName|schemaName|procedureName|varchar-varchar-varchar'
```
:param str resource_name: The name of the resource.
:param ProcedureArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProcedureArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
arguments: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProcedureArgumentArgs']]]]] = None,
comment: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
execute_as: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
null_input_behavior: Optional[pulumi.Input[str]] = None,
return_behavior: Optional[pulumi.Input[str]] = None,
return_type: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input[str]] = None,
statement: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProcedureArgs.__new__(ProcedureArgs)
__props__.__dict__["arguments"] = arguments
__props__.__dict__["comment"] = comment
if database is None and not opts.urn:
raise TypeError("Missing required property 'database'")
__props__.__dict__["database"] = database
__props__.__dict__["execute_as"] = execute_as
__props__.__dict__["name"] = name
__props__.__dict__["null_input_behavior"] = null_input_behavior
__props__.__dict__["return_behavior"] = return_behavior
if return_type is None and not opts.urn:
raise TypeError("Missing required property 'return_type'")
__props__.__dict__["return_type"] = return_type
if schema is None and not opts.urn:
raise TypeError("Missing required property 'schema'")
__props__.__dict__["schema"] = schema
if statement is None and not opts.urn:
raise TypeError("Missing required property 'statement'")
__props__.__dict__["statement"] = statement
super(Procedure, __self__).__init__(
'snowflake:index/procedure:Procedure',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arguments: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProcedureArgumentArgs']]]]] = None,
comment: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
execute_as: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
null_input_behavior: Optional[pulumi.Input[str]] = None,
return_behavior: Optional[pulumi.Input[str]] = None,
return_type: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input[str]] = None,
statement: Optional[pulumi.Input[str]] = None) -> 'Procedure':
"""
Get an existing Procedure resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProcedureArgumentArgs']]]] arguments: List of the arguments for the procedure
:param pulumi.Input[str] comment: Specifies a comment for the procedure.
:param pulumi.Input[str] database: The database in which to create the procedure. Don't use the | character.
:param pulumi.Input[str] execute_as: Sets execute context - see caller's rights and owner's rights
:param pulumi.Input[str] name: Specifies the identifier for the procedure; does not have to be unique for the schema in which the procedure is created. Don't use the | character.
:param pulumi.Input[str] null_input_behavior: Specifies the behavior of the procedure when called with null inputs.
:param pulumi.Input[str] return_behavior: Specifies the behavior of the function when returning results
:param pulumi.Input[str] return_type: The return type of the procedure
:param pulumi.Input[str] schema: The schema in which to create the procedure. Don't use the | character.
:param pulumi.Input[str] statement: Specifies the javascript code used to create the procedure.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ProcedureState.__new__(_ProcedureState)
__props__.__dict__["arguments"] = arguments
__props__.__dict__["comment"] = comment
__props__.__dict__["database"] = database
__props__.__dict__["execute_as"] = execute_as
__props__.__dict__["name"] = name
__props__.__dict__["null_input_behavior"] = null_input_behavior
__props__.__dict__["return_behavior"] = return_behavior
__props__.__dict__["return_type"] = return_type
__props__.__dict__["schema"] = schema
__props__.__dict__["statement"] = statement
return Procedure(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arguments(self) -> pulumi.Output[Optional[Sequence['outputs.ProcedureArgument']]]:
"""
List of the arguments for the procedure
"""
return pulumi.get(self, "arguments")
@property
@pulumi.getter
def comment(self) -> pulumi.Output[Optional[str]]:
"""
Specifies a comment for the procedure.
"""
return pulumi.get(self, "comment")
@property
@pulumi.getter
def database(self) -> pulumi.Output[str]:
"""
The database in which to create the procedure. Don't use the | character.
"""
return pulumi.get(self, "database")
@property
@pulumi.getter(name="executeAs")
def execute_as(self) -> pulumi.Output[Optional[str]]:
"""
Sets execute context - see caller's rights and owner's rights
"""
return pulumi.get(self, "execute_as")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the identifier for the procedure; does not have to be unique for the schema in which the procedure is created. Don't use the | character.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nullInputBehavior")
def null_input_behavior(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the behavior of the procedure when called with null inputs.
"""
return pulumi.get(self, "null_input_behavior")
@property
@pulumi.getter(name="returnBehavior")
def return_behavior(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the behavior of the function when returning results
"""
return pulumi.get(self, "return_behavior")
@property
@pulumi.getter(name="returnType")
def return_type(self) -> pulumi.Output[str]:
"""
The return type of the procedure
"""
return pulumi.get(self, "return_type")
@property
@pulumi.getter
def schema(self) -> pulumi.Output[str]:
"""
The schema in which to create the procedure. Don't use the | character.
"""
return pulumi.get(self, "schema")
@property
@pulumi.getter
def statement(self) -> pulumi.Output[str]:
"""
Specifies the javascript code used to create the procedure.
"""
return pulumi.get(self, "statement")
| 42.290076
| 186
| 0.63231
| 3,181
| 27,700
| 5.328199
| 0.060358
| 0.095404
| 0.098295
| 0.089563
| 0.89433
| 0.871202
| 0.854682
| 0.833737
| 0.820048
| 0.811375
| 0
| 0.000637
| 0.263141
| 27,700
| 654
| 187
| 42.35474
| 0.829749
| 0.338159
| 0
| 0.756374
| 1
| 0
| 0.09743
| 0.015022
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161473
| false
| 0.002833
| 0.01983
| 0
| 0.27762
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
796203c4a4e3699aace24f7c46f2b0f75e203180
| 159
|
py
|
Python
|
src/statue/cli/config/config_cli.py
|
EddLabs/eddington-static
|
cdd1d9514c4eea1bd06c24894b3922e6cc3fb1f5
|
[
"Apache-2.0"
] | null | null | null |
src/statue/cli/config/config_cli.py
|
EddLabs/eddington-static
|
cdd1d9514c4eea1bd06c24894b3922e6cc3fb1f5
|
[
"Apache-2.0"
] | null | null | null |
src/statue/cli/config/config_cli.py
|
EddLabs/eddington-static
|
cdd1d9514c4eea1bd06c24894b3922e6cc3fb1f5
|
[
"Apache-2.0"
] | null | null | null |
"""Configuration main CLI."""
from statue.cli.cli import statue_cli
@statue_cli.group("config")
def config_cli():
"""Configuration related actions."""
| 15.9
| 40
| 0.710692
| 20
| 159
| 5.5
| 0.55
| 0.245455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 159
| 9
| 41
| 17.666667
| 0.797101
| 0.339623
| 0
| 0
| 0
| 0
| 0.06383
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
796669d31a7265308d5c1fc7ee7e7581a2a96bdf
| 164
|
py
|
Python
|
src/game/__init__.py
|
gabdube/panic-panda
|
ecbda506eeafe3dbdf932cdb20b938646502f892
|
[
"MIT"
] | 67
|
2019-01-06T13:01:46.000Z
|
2022-01-04T17:50:58.000Z
|
src/game/__init__.py
|
gabdube/panic-panda
|
ecbda506eeafe3dbdf932cdb20b938646502f892
|
[
"MIT"
] | 2
|
2019-01-07T18:25:00.000Z
|
2021-05-10T09:32:17.000Z
|
src/game/__init__.py
|
gabdube/panic-panda
|
ecbda506eeafe3dbdf932cdb20b938646502f892
|
[
"MIT"
] | 6
|
2019-07-31T08:16:26.000Z
|
2020-12-26T04:34:52.000Z
|
from .main_scene import *
from .debug_textures_scene import *
from .debug_normals_scene import *
from .debug_pbr2_scene import *
from .debug_compute_scene import *
| 27.333333
| 35
| 0.817073
| 24
| 164
| 5.208333
| 0.375
| 0.44
| 0.48
| 0.64
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006944
| 0.121951
| 164
| 5
| 36
| 32.8
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
796e21d5f0bcc77ee701fc3c69988452442a415f
| 11,175
|
py
|
Python
|
load_confounds/tests/test_strategies.py
|
htwangtw/load_confounds
|
7f04dbbc7da0074d5de28585775fa5d5a4521d9f
|
[
"MIT"
] | null | null | null |
load_confounds/tests/test_strategies.py
|
htwangtw/load_confounds
|
7f04dbbc7da0074d5de28585775fa5d5a4521d9f
|
[
"MIT"
] | null | null | null |
load_confounds/tests/test_strategies.py
|
htwangtw/load_confounds
|
7f04dbbc7da0074d5de28585775fa5d5a4521d9f
|
[
"MIT"
] | null | null | null |
"""Test predefined denoising strategies."""
import os
import re
import load_confounds.strategies as lc
import numpy as np
path_data = os.path.join(os.path.dirname(lc.__file__), "data")
file_confounds = os.path.join(path_data, "test_desc-confounds_regressors.tsv")
def test_Params2():
"""Test the Params2 strategy."""
# Try to load the confounds, whithout PCA reduction
conf = lc.Params2()
conf.load(file_confounds)
assert isinstance(conf.confounds_, np.ndarray)
# Check that all model categories have been successfully loaded
list_check = [
"cosine00",
"cosine01",
"cosine02",
"cosine03",
"csf",
"white_matter",
]
for check in list_check:
assert check in conf.columns_
def test_Params6():
"""Test the Params6 strategy."""
# Try to load the confounds, whithout PCA reduction
conf = lc.Params6()
conf.load(file_confounds)
assert isinstance(conf.confounds_, np.ndarray)
# Check that all model categories have been successfully loaded
list_check = [
"trans_x",
"trans_y",
"trans_z",
"rot_x",
"rot_y",
"rot_z",
"cosine00",
]
for check in list_check:
assert check in conf.columns_
# Load the confounds in a list
conf.load([file_confounds, file_confounds])
assert isinstance(conf.confounds_, list)
assert isinstance(conf.confounds_[0], np.ndarray)
assert len(conf.confounds_) == 2
def test_Params9():
"""Test the Params9 strategy."""
# Try to load the confounds, whithout PCA reduction
conf = lc.Params9()
conf.load(file_confounds)
assert isinstance(conf.confounds_, np.ndarray)
# Check that all model categories have been successfully loaded
list_check = [
"trans_y",
"trans_z",
"rot_z",
"cosine00",
"csf",
"white_matter",
"global_signal",
]
for check in list_check:
assert check in conf.columns_
def test_Params9Scrub():
"""Test the Params9Scrub strategy."""
# Try to load the confounds, whithout PCA reduction
conf = lc.Params9Scrub(fd_thresh=0.15)
conf.load(file_confounds)
assert isinstance(conf.confounds_, np.ndarray)
# Check that all model categories have been successfully loaded
list_check = [
"trans_y",
"trans_z",
"rot_z",
"cosine00",
"csf",
"white_matter",
"motion_outlier_0",
"motion_outlier_1",
]
for check in list_check:
assert check in conf.columns_
# also load confounds with very liberal scrubbing thresholds
# this should not produce an error
conf = lc.Params9Scrub(fd_thresh=1, std_dvars_thresh=5)
conf.load(file_confounds)
assert "motion_outlier_0" not in conf.columns_
def test_Params24():
"""Test the Params24 strategy."""
# Try to load the confounds, whithout PCA reduction
conf = lc.Params24()
conf.load(file_confounds)
assert isinstance(conf.confounds_, np.ndarray)
# Check that all model categories have been successfully loaded
list_check = [
"trans_x",
"trans_y",
"rot_z",
"trans_x_derivative1",
"trans_x_power2",
"trans_x_derivative1_power2",
"trans_z_derivative1",
"trans_z_power2",
"rot_x_power2",
"rot_y_power2",
"rot_y_derivative1_power2",
"rot_z_derivative1",
"cosine00",
]
for check in list_check:
assert check in conf.columns_
def test_Params36():
"""Test the Params36 strategy."""
# Try to load the confounds
conf = lc.Params36()
conf.load(file_confounds)
assert isinstance(conf.confounds_, np.ndarray)
# Check that all model categories have been successfully loaded
list_check = [
"trans_x",
"trans_y",
"rot_z",
"trans_x_derivative1",
"trans_x_power2",
"trans_x_derivative1_power2",
"trans_y_derivative1",
"trans_y_power2",
"trans_y_derivative1_power2",
"trans_z_derivative1",
"trans_z_power2",
"rot_z_derivative1",
"rot_z_power2",
"rot_z_derivative1_power2",
"cosine00",
"cosine01",
"csf",
"white_matter",
"csf_derivative1",
"csf_power2",
"csf_derivative1_power2",
"white_matter_derivative1",
"global_signal",
"global_signal_derivative1",
"global_signal_power2",
"global_signal_derivative1_power2",
]
for check in list_check:
assert check in conf.columns_
def test_Params36Scrub():
"""Test the Params36Scrub strategy."""
conf = lc.Params36Scrub(fd_thresh=0.15)
conf.load(file_confounds)
assert isinstance(conf.confounds_, np.ndarray)
# Check that all model categories have been successfully loaded
list_check = [
"trans_x",
"trans_y",
"rot_z",
"trans_x_derivative1",
"trans_x_power2",
"trans_x_derivative1_power2",
"trans_y_derivative1",
"trans_y_power2",
"trans_y_derivative1_power2",
"trans_z_derivative1",
"trans_z_power2",
"rot_z_derivative1",
"rot_z_power2",
"rot_z_derivative1_power2",
"cosine00",
"cosine01",
"csf",
"white_matter",
"csf_derivative1",
"csf_power2",
"csf_derivative1_power2",
"white_matter_derivative1",
"motion_outlier_0",
"motion_outlier_1",
]
for check in list_check:
assert check in conf.columns_
# also load confounds with very liberal scrubbing thresholds
# this should not produce an error
conf = lc.Params36Scrub(fd_thresh=1, std_dvars_thresh=5)
conf.load(file_confounds)
assert "motion_outlier_0" not in conf.columns_
def test_AnatCompCor():
"""Test the AnatCompCor strategy."""
# Try to load the confounds, whithout PCA reduction
conf = lc.AnatCompCor()
conf.load(file_confounds)
assert isinstance(conf.confounds_, np.ndarray)
list_check = [
"trans_x",
"trans_y",
"rot_z",
"trans_x_derivative1",
"trans_x_power2",
"trans_y_derivative1_power2",
"trans_z_derivative1",
"trans_z_power2",
"trans_z_derivative1_power2",
"rot_y_derivative1",
"rot_y_power2",
"rot_z_power2",
"rot_z_derivative1_power2",
"cosine00",
"cosine01",
"a_comp_cor_00",
"a_comp_cor_01",
"a_comp_cor_02",
]
for check in list_check:
assert check in conf.columns_
compcor_col_str_anat = "".join(conf.columns_)
assert "t_comp_cor_" not in compcor_col_str_anat
assert (
"a_comp_cor_57" not in compcor_col_str_anat
) # this one comes from the WW mask
def test_AnatCompCor_not_combined():
"""Test the AnatCompCor strategy without combined mask."""
# Try to load the confounds, whithout PCA reduction
conf = lc.AnatCompCor(acompcor_combined=False, n_compcor=5)
conf.load(file_confounds)
assert isinstance(conf.confounds_, np.ndarray)
list_check = [
"trans_x",
"trans_y",
"rot_z",
"trans_x_derivative1",
"trans_x_power2",
"trans_y_derivative1_power2",
"trans_z_derivative1",
"trans_z_power2",
"trans_z_derivative1_power2",
"rot_y_derivative1",
"rot_y_power2",
"rot_z_power2",
"rot_z_derivative1_power2",
"cosine00",
"cosine01",
"a_comp_cor_57", # from CSF mask
"a_comp_cor_61", # from CSF mask
"a_comp_cor_70", # from WM mask
"a_comp_cor_74", # from WM mask
]
for check in list_check:
assert check in conf.columns_
compcor_col_str_anat = "".join(conf.columns_)
assert "t_comp_cor_" not in compcor_col_str_anat
assert (
"a_comp_cor_00" not in compcor_col_str_anat
) # this one comes from the combined mask
assert (
"a_comp_cor_62" not in compcor_col_str_anat
) # this one exceeds the number of requested components
assert (
"a_comp_cor_75" not in compcor_col_str_anat
) # this one exceeds the number of requested components
def test_TempCompCor():
"""Test the TempCompCor strategy."""
# Try to load the confounds, whithout PCA reduction
conf = lc.TempCompCor()
conf.load(file_confounds)
assert isinstance(conf.confounds_, np.ndarray)
list_check = [
"cosine00",
"cosine01",
"cosine02",
"cosine03",
"t_comp_cor_00",
"t_comp_cor_01",
"t_comp_cor_02",
"t_comp_cor_03",
]
for check in list_check:
assert check in conf.columns_
compcor_col_str_anat = "".join(conf.columns_)
assert "a_comp_cor_" not in compcor_col_str_anat
def test_FullCompCor():
"""Test a full compcor strategy."""
# This is not a predefined strategy
# but can be implemented easily with flexible API
conf = lc.Confounds(["compcor"], compcor="full", acompcor_combined=False)
conf.load(file_confounds)
assert isinstance(conf.confounds_, np.ndarray)
list_check = [
"t_comp_cor_00",
"t_comp_cor_01",
"t_comp_cor_02",
"t_comp_cor_03",
"a_comp_cor_57", # from CSF mask
"a_comp_cor_58", # from CSF mask
"a_comp_cor_105", # from WM mask
]
for check in list_check:
assert check in conf.columns_
def test_ICAAROMA():
"""Test the (non-aggressive) ICA-AROMA strategy."""
conf = lc.ICAAROMA()
conf.load(file_confounds)
assert isinstance(conf.confounds_, np.ndarray)
# Check that all fixed name model categories have been successfully loaded
list_check = [
"csf",
"white_matter",
"global_signal",
]
for c in conf.columns_:
# Check that all fixed name model categories
fixed = c in list_check
cosines = re.match("cosine+", c)
assert fixed or cosines
def test_AROMAGSR():
"""Test the (non-aggressive) AROMA-GSR strategy."""
conf = lc.AROMAGSR()
conf.load(file_confounds)
# Check that all fixed name model categories have been successfully loaded
list_check = [
"csf",
"white_matter",
"global_signal",
]
for c in conf.columns_:
# Check that all fixed name model categories
fixed = c in list_check
cosines = re.match("cosine+", c)
assert fixed or cosines
def test_AggrICAAROMA():
"""Test the aggressive ICA-AROMA strategy."""
conf = lc.AggrICAAROMA()
conf.load(file_confounds)
# Check that all fixed name model categories have been successfully loaded
list_check = [
"csf",
"white_matter",
"global_signal",
]
for c in conf.columns_:
# Check that all fixed name model categories
fixed = c in list_check
cosines = re.match("cosine+", c)
aroma = re.match("aroma_motion_+", c)
assert fixed or cosines or aroma
| 26.992754
| 78
| 0.630604
| 1,382
| 11,175
| 4.801737
| 0.11288
| 0.037975
| 0.030741
| 0.053797
| 0.80425
| 0.79129
| 0.753617
| 0.753617
| 0.749247
| 0.745178
| 0
| 0.02666
| 0.274989
| 11,175
| 413
| 79
| 27.058111
| 0.792397
| 0.204832
| 0
| 0.735974
| 0
| 0
| 0.246722
| 0.061224
| 0
| 0
| 0
| 0
| 0.125413
| 1
| 0.046205
| false
| 0
| 0.013201
| 0
| 0.059406
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
797185fb27f771e694cf1e1abd9ec62d38efb036
| 46
|
py
|
Python
|
ocetrac/_version.py
|
ocetrac/ocetrac
|
0aca97977a6286c511c530c42352bc80c685b22e
|
[
"MIT"
] | 22
|
2021-06-22T15:17:20.000Z
|
2022-03-04T18:40:16.000Z
|
ocetrac/_version.py
|
ocetrac/ocetrac
|
0aca97977a6286c511c530c42352bc80c685b22e
|
[
"MIT"
] | 25
|
2021-04-16T13:43:16.000Z
|
2022-02-01T05:10:29.000Z
|
ocetrac/_version.py
|
ocetrac/ocetrac
|
0aca97977a6286c511c530c42352bc80c685b22e
|
[
"MIT"
] | 4
|
2021-04-22T14:43:42.000Z
|
2021-11-24T22:09:59.000Z
|
__version__ = "0.1.4.dev32+g964124d.d20210818"
| 46
| 46
| 0.782609
| 7
| 46
| 4.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.431818
| 0.043478
| 46
| 1
| 46
| 46
| 0.295455
| 0
| 0
| 0
| 0
| 0
| 0.638298
| 0.638298
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
79873d6396b001348480ee3936ddb4652d520b57
| 1,274
|
py
|
Python
|
tests/testsuite/contrib/test_admin.py
|
danlamanna/django-rules
|
27656602e83dd7fdef1b4c74f1ea80399b686380
|
[
"MIT"
] | 3
|
2019-09-28T20:39:11.000Z
|
2019-11-14T14:21:54.000Z
|
tests/testsuite/contrib/test_admin.py
|
danlamanna/django-rules
|
27656602e83dd7fdef1b4c74f1ea80399b686380
|
[
"MIT"
] | null | null | null |
tests/testsuite/contrib/test_admin.py
|
danlamanna/django-rules
|
27656602e83dd7fdef1b4c74f1ea80399b686380
|
[
"MIT"
] | 1
|
2017-12-06T17:17:25.000Z
|
2017-12-06T17:17:25.000Z
|
from django.test import TestCase
from django.urls import reverse
from . import TestData
class ModelAdminTests(TestData, TestCase):
def test_change_book(self):
# adrian can change his book as its author
self.assertTrue(self.client.login(username='adrian', password='secr3t'))
response = self.client.get(reverse('admin:testapp_book_change', args=(1,)))
self.assertEqual(response.status_code, 200)
# martin can change adrian's book as an editor
self.assertTrue(self.client.login(username='martin', password='secr3t'))
response = self.client.get(reverse('admin:testapp_book_change', args=(1,)))
self.assertEqual(response.status_code, 200)
def test_delete_book(self):
# martin can *not* delete adrian's book
self.assertTrue(self.client.login(username='martin', password='secr3t'))
response = self.client.get(reverse('admin:testapp_book_delete', args=(1,)))
self.assertEqual(response.status_code, 403)
# adrian can delete his book as its author
self.assertTrue(self.client.login(username='adrian', password='secr3t'))
response = self.client.get(reverse('admin:testapp_book_delete', args=(1,)))
self.assertEqual(response.status_code, 200)
| 43.931034
| 83
| 0.698587
| 163
| 1,274
| 5.361963
| 0.263804
| 0.091533
| 0.08238
| 0.10984
| 0.715103
| 0.715103
| 0.715103
| 0.715103
| 0.71167
| 0.71167
| 0
| 0.019102
| 0.178179
| 1,274
| 28
| 84
| 45.5
| 0.815664
| 0.128728
| 0
| 0.611111
| 0
| 0
| 0.133937
| 0.090498
| 0
| 0
| 0
| 0
| 0.444444
| 1
| 0.111111
| false
| 0.222222
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
8de702237ef90b7053f7430d9b1be67c3c8ee4a2
| 12,385
|
py
|
Python
|
app/core/gopup/index/index_baidu.py
|
ZhouRR/quotations-gateway-api
|
ef433fe8e461344a6c59e5edec206ad4ba7eeff6
|
[
"Apache-2.0"
] | null | null | null |
app/core/gopup/index/index_baidu.py
|
ZhouRR/quotations-gateway-api
|
ef433fe8e461344a6c59e5edec206ad4ba7eeff6
|
[
"Apache-2.0"
] | null | null | null |
app/core/gopup/index/index_baidu.py
|
ZhouRR/quotations-gateway-api
|
ef433fe8e461344a6c59e5edec206ad4ba7eeff6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/3/21 0021
# @Author : justin.郑 3907721@qq.com
# @File : index_baidu.py
# @Desc : 获取百度指数
import json
import urllib.parse
import pandas as pd
import requests
def decrypt(t: str, e: str) -> str:
"""
解密函数
:param t:
:type t:
:param e:
:type e:
:return:
:rtype:
"""
n, i, a, result = list(t), list(e), {}, []
ln = int(len(n) / 2)
start, end = n[ln:], n[:ln]
a = dict(zip(end, start))
return "".join([a[j] for j in e])
def get_ptbk(uniqid: str, cookie: str) -> str:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Connection": "keep-alive",
"Cookie": cookie,
"Host": "index.baidu.com",
"Referer": "http://index.baidu.com/v2/main/index.html",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
session = requests.Session()
session.headers.update(headers)
with session.get(
url=f"http://index.baidu.com/Interface/ptbk?uniqid={uniqid}"
) as response:
ptbk = response.json()["data"]
return ptbk
def baidu_interest_index(word, cookie):
"""
百度指数 人群画像兴趣分布
:param word: 关键词
:param cookie:
:return:
desc 兴趣分类
tgi TGI指数
word_rate 关键词分布比率
all_rate 全网分布比率
period 周期范围
"""
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cookie": cookie,
"DNT": "1",
"Host": "zhishu.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "zhishu.baidu.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
url = "http://index.baidu.com/api/SocialApi/interest?wordlist[]=%s" % word
r = requests.get(url=url, headers=headers)
data = json.loads(r.text)['data']
period = "%s|%s" % (data['startDate'], data['endDate'])
age_list = data['result'][0]['interest']
age_df = pd.DataFrame(age_list)
all_list = data['result'][1]['interest']
all_df = pd.DataFrame(all_list)
all_df.drop(["tgi", "typeId"], axis=1, inplace=True)
res_df = pd.merge(age_df, all_df, on='desc')
res_df['period'] = period
res_df.drop(["typeId"], axis=1, inplace=True)
res_df.rename(columns={'rate_x': 'word_rate', 'rate_y': 'all_rate'}, inplace=True)
return res_df
except:
return None
def baidu_gender_index(word, cookie):
"""
百度指数 人群画像性别分布
:param word: 关键词
:param cookie:
:return:
desc 性别
tgi TGI指数
word_rate 关键词分布比率
all_rate 全网分布比率
period 周期范围
"""
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cookie": cookie,
"DNT": "1",
"Host": "zhishu.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "zhishu.baidu.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
url = "http://index.baidu.com/api/SocialApi/baseAttributes?wordlist[]=%s" % word
r = requests.get(url=url, headers=headers)
data = json.loads(r.text)['data']
period = "%s|%s" % (data['startDate'], data['endDate'])
age_list = data['result'][0]['gender']
age_df = pd.DataFrame(age_list)
all_list = data['result'][1]['gender']
all_df = pd.DataFrame(all_list)
all_df.drop(["tgi", "typeId"], axis=1, inplace=True)
res_df = pd.merge(age_df, all_df, on='desc')
res_df['period'] = period
res_df.drop(["typeId"], axis=1, inplace=True)
res_df.rename(columns={'rate_x': 'word_rate', 'rate_y': 'all_rate'}, inplace=True)
return res_df
except:
return None
def baidu_age_index(word, cookie):
"""
百度指数 人群画像年龄分布
:param word: 关键词
:param cookie:
:return:
desc 年龄范围
tgi TGI指数
word_rate 关键词分布比率
all_rate 全网分布比率
period 周期范围
"""
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cookie": cookie,
"DNT": "1",
"Host": "zhishu.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "zhishu.baidu.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
url = "http://index.baidu.com/api/SocialApi/baseAttributes?wordlist[]=%s" % word
r = requests.get(url=url, headers=headers)
data = json.loads(r.text)['data']
period = "%s|%s" % (data['startDate'], data['endDate'])
age_list = data['result'][0]['age']
age_df = pd.DataFrame(age_list)
all_list = data['result'][1]['age']
all_df = pd.DataFrame(all_list)
all_df.drop(["tgi", "typeId"], axis=1, inplace=True)
res_df = pd.merge(age_df, all_df, on='desc')
res_df['period'] = period
res_df.drop(["typeId"], axis=1, inplace=True)
res_df.rename(columns={'rate_x': 'word_rate', 'rate_y': 'all_rate'}, inplace=True)
return res_df
except:
return None
def baidu_atlas_index(word, cookie, date=None):
"""
百度指数 需求图谱
:param word: 关键词
:param cookie:
:param date: 周期
:return:
period 周期范围
word 相关词
pv 搜索热度
ratio 搜索变化率
"""
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cookie": cookie,
"DNT": "1",
"Host": "zhishu.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "zhishu.baidu.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
if date == None:
date = ""
url = "http://index.baidu.com/api/WordGraph/multi?wordlist[]=%s&datelist=%s" % (word, date)
r = requests.get(url=url, headers=headers)
data = json.loads(r.text)['data']
wordlist = data['wordlist'][0]['wordGraph']
res_list = []
for word in wordlist:
tmp = {
"word": word['word'],
"pv": word['pv'],
"ratio": word['ratio'],
"period": data['period']
# "sim": word['sim']
}
res_list.append(tmp)
df = pd.DataFrame(res_list)
return df
except:
return None
def baidu_search_index(word, start_date, end_date, cookie, type="all"):
# 百度搜索数据
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Connection": "keep-alive",
"Cookie": cookie,
"Host": "index.baidu.com",
"Referer": "http://index.baidu.com/v2/main/index.html",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36"
}
w = '{"name":"%s","wordType":1}' % word
url = 'http://index.baidu.com/api/SearchApi/index?area=0&word=[[%s]]&startDate=%s&endDate=%s' % (w, start_date, end_date)
r = requests.get(url=url, headers=headers)
data = r.json()["data"]
all_data = data["userIndexes"][0][type]["data"]
uniqid = data["uniqid"]
ptbk = get_ptbk(uniqid, cookie)
result = decrypt(ptbk, all_data).split(",")
result = [int(item) if item != "" else 0 for item in result]
temp_df_7 = pd.DataFrame(
[pd.date_range(start=start_date, end=end_date), result],
index=["date", word],
).T
temp_df_7.index = pd.to_datetime(temp_df_7["date"])
del temp_df_7["date"]
return temp_df_7
except Exception as e:
return None
def baidu_info_index(word, start_date, end_date, cookie):
# 百度资讯指数
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Connection": "keep-alive",
"Cookie": cookie,
"Host": "index.baidu.com",
"Referer": "http://index.baidu.com/v2/main/index.html",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36"
}
w = '{"name":"%s","wordType":1}' % word
url = 'http://index.baidu.com/api/FeedSearchApi/getFeedIndex?area=0&word=[[%s]]&startDate=%s&endDate=%s' % (
w, start_date, end_date)
r = requests.get(url=url, headers=headers)
data = r.json()["data"]
all_data = data["index"][0]["data"]
uniqid = data["uniqid"]
ptbk = get_ptbk(uniqid, cookie)
result = decrypt(ptbk, all_data).split(",")
result = [int(item) if item != "" else 0 for item in result]
temp_df_7 = pd.DataFrame(
[pd.date_range(start=start_date, end=end_date), result],
index=["date", word],
).T
temp_df_7.index = pd.to_datetime(temp_df_7["date"])
del temp_df_7["date"]
return temp_df_7
except:
return None
def baidu_media_index(word, start_date, end_date, cookie):
# 百度媒体指数
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Connection": "keep-alive",
"Cookie": cookie,
"Host": "index.baidu.com",
"Referer": "http://index.baidu.com/v2/main/index.html",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36"
}
w = '{"name":"%s","wordType":1}' % word
url = 'http://index.baidu.com/api/NewsApi/getNewsIndex?area=0&word=[[%s]]&startDate=%s&endDate=%s' % (w, start_date, end_date)
r = requests.get(url=url, headers=headers)
data = r.json()["data"]
all_data = data["index"][0]["data"]
uniqid = data["uniqid"]
ptbk = get_ptbk(uniqid, cookie)
result = decrypt(ptbk, all_data).split(",")
result = [int(item) if item != "" else 0 for item in result]
temp_df_7 = pd.DataFrame(
[pd.date_range(start=start_date, end=end_date), result],
index=["date", word],
).T
temp_df_7.index = pd.to_datetime(temp_df_7["date"])
del temp_df_7["date"]
return temp_df_7
except:
return None
if __name__ == "__main__":
cookie = '*'
data = baidu_search_index(word="口罩", start_date='2020-12-01', end_date='2020-12-24', cookie=cookie)
print(data)
| 34.307479
| 149
| 0.542107
| 1,577
| 12,385
| 4.150919
| 0.143944
| 0.029331
| 0.031775
| 0.031164
| 0.818821
| 0.814085
| 0.806599
| 0.771769
| 0.771769
| 0.771158
| 0
| 0.036279
| 0.290028
| 12,385
| 360
| 150
| 34.402778
| 0.708177
| 0.068954
| 0
| 0.741176
| 0
| 0.047059
| 0.352316
| 0.015056
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035294
| false
| 0
| 0.015686
| 0
| 0.113725
| 0.003922
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5c0d9f52c23833f76ab14324994acd7bcd09237b
| 2,919
|
py
|
Python
|
pycounter/test/test_br1.py
|
pybrarian/pycounter
|
ea2784137d8908b90c7f81022ff8a313cec7635d
|
[
"MIT"
] | null | null | null |
pycounter/test/test_br1.py
|
pybrarian/pycounter
|
ea2784137d8908b90c7f81022ff8a313cec7635d
|
[
"MIT"
] | null | null | null |
pycounter/test/test_br1.py
|
pybrarian/pycounter
|
ea2784137d8908b90c7f81022ff8a313cec7635d
|
[
"MIT"
] | null | null | null |
"""Test parsing of COUNTER BR1 book report."""
from __future__ import absolute_import
import datetime
import os
import unittest
import pycounter.report
class ParseExample(unittest.TestCase):
"""Tests for parsing C3 BR1"""
def setUp(self):
self.report = pycounter.report.parse(
os.path.join(os.path.dirname(__file__),
'data/simpleBR1.csv'))
def test_reportname(self):
self.assertEqual(self.report.report_type, u'BR1')
self.assertEqual(self.report.report_version, 1)
def test_year(self):
self.assertEqual(self.report.year, 2012)
def test_platform(self):
for publication in self.report:
self.assertEqual(publication.publisher, u"Megadodo Publications")
self.assertEqual(publication.platform, u"HHGTTG Online")
def test_stats(self):
publication = self.report.pubs[0]
self.assertEqual(
[x[2] for x in publication],
[0, 25, 0, 0, 0, 0])
def test_customer(self):
self.assertEqual(self.report.customer,
u"University of Maximegalon")
def test_date_run(self):
self.assertEqual(self.report.date_run, datetime.date(2012, 7, 9))
def test_period(self):
self.assertEqual(self.report.period,
(datetime.date(2012, 1, 1),
datetime.date(2012, 6, 30)))
class ParseCounter4Example(unittest.TestCase):
"""Tests for parsing C4 BR1"""
def setUp(self):
self.report = pycounter.report.parse(
os.path.join(os.path.dirname(__file__),
'data/C4BR1.tsv'))
def test_reportname(self):
self.assertEqual(self.report.report_type, u'BR1')
self.assertEqual(self.report.report_version, 4)
def test_year(self):
self.assertEqual(self.report.year, 2012)
def test_platform(self):
for publication in self.report:
self.assertEqual(publication.publisher, u"Megadodo Publications")
self.assertEqual(publication.platform, u"HHGTTG Online")
def test_stats(self):
publication = self.report.pubs[0]
self.assertEqual(
[x[2] for x in publication],
[0, 25, 0, 0, 0, 0])
def test_metric(self):
self.assertEqual(self.report.metric, u"Book Title Requests")
def test_customer(self):
self.assertEqual(self.report.customer,
u"University of Maximegalon")
def test_date_run(self):
self.assertEqual(self.report.date_run, datetime.date(2012, 7, 9))
def test_period(self):
self.assertEqual(self.report.period,
(datetime.date(2012, 1, 1),
datetime.date(2012, 6, 30)))
def test_isbn(self):
publication = self.report.pubs[0]
self.assertEqual(publication.isbn, u'9787490833809')
| 31.387097
| 77
| 0.617335
| 349
| 2,919
| 5.057307
| 0.212034
| 0.113314
| 0.139943
| 0.184136
| 0.853824
| 0.802266
| 0.802266
| 0.802266
| 0.776771
| 0.776771
| 0
| 0.042723
| 0.270298
| 2,919
| 92
| 78
| 31.728261
| 0.785915
| 0.030832
| 0
| 0.772727
| 0
| 0
| 0.066833
| 0
| 0
| 0
| 0
| 0
| 0.30303
| 1
| 0.272727
| false
| 0
| 0.075758
| 0
| 0.378788
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.