hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
2544523311cc08e244cfc6c3f7029491eb958c6b
65
py
Python
data/scrape/link_extractors/__init__.py
jamesrharwood/journal-guidelines
fe6c0a6d3c0443df6fc816b9503fad24459ddb4a
[ "MIT" ]
null
null
null
data/scrape/link_extractors/__init__.py
jamesrharwood/journal-guidelines
fe6c0a6d3c0443df6fc816b9503fad24459ddb4a
[ "MIT" ]
null
null
null
data/scrape/link_extractors/__init__.py
jamesrharwood/journal-guidelines
fe6c0a6d3c0443df6fc816b9503fad24459ddb4a
[ "MIT" ]
null
null
null
from .extractors import extract_links, extract_links_by_strategy
32.5
64
0.892308
9
65
6
0.777778
0.444444
0
0
0
0
0
0
0
0
0
0
0.076923
65
1
65
65
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
c25f383b08dc16b275556e63301d0fafc05360a0
124
py
Python
strongr/core/exception/__init__.py
bigr-erasmusmc/StrongR
48573e170771a251f629f2d13dba7173f010a38c
[ "Apache-2.0" ]
null
null
null
strongr/core/exception/__init__.py
bigr-erasmusmc/StrongR
48573e170771a251f629f2d13dba7173f010a38c
[ "Apache-2.0" ]
null
null
null
strongr/core/exception/__init__.py
bigr-erasmusmc/StrongR
48573e170771a251f629f2d13dba7173f010a38c
[ "Apache-2.0" ]
null
null
null
from .isnotcallableexception import IsNotCallableException from .invalidparameterexception import InvalidParameterException
41.333333
64
0.919355
8
124
14.25
0.5
0
0
0
0
0
0
0
0
0
0
0
0.064516
124
2
65
62
0.982759
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
c261396a1367bc9b5512a504dcafa2e8bcfd1807
102
py
Python
vnpy/app/algo_trading/__init__.py
Billy-Meng/vnpy_origin
b0b0868027d70b1ba5dac65aa1a6d5e4246a0900
[ "MIT" ]
1
2020-06-18T16:38:29.000Z
2020-06-18T16:38:29.000Z
vnpy/app/algo_trading/__init__.py
Billy-Meng/vnpy_origin
b0b0868027d70b1ba5dac65aa1a6d5e4246a0900
[ "MIT" ]
2
2020-06-22T12:12:43.000Z
2020-06-23T01:26:10.000Z
vnpy/app/algo_trading/__init__.py
Billy-Meng/vnpy_origin
b0b0868027d70b1ba5dac65aa1a6d5e4246a0900
[ "MIT" ]
null
null
null
# -*- coding:utf-8 -*- import sys import vnpy_algotrading sys.modules[__name__] = vnpy_algotrading
12.75
40
0.735294
13
102
5.307692
0.692308
0.434783
0
0
0
0
0
0
0
0
0
0.011364
0.137255
102
7
41
14.571429
0.772727
0.196078
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
c2aad64a2aad2f61466e44cae8495b6cafe36146
5,168
py
Python
test/test_get_transaction_details_by_transaction_id_response_item_blockchain_specific.py
xan187/Crypto_APIs_2.0_SDK_Python
a56c75df54ef037b39be1315ed6e54de35bed55b
[ "MIT" ]
null
null
null
test/test_get_transaction_details_by_transaction_id_response_item_blockchain_specific.py
xan187/Crypto_APIs_2.0_SDK_Python
a56c75df54ef037b39be1315ed6e54de35bed55b
[ "MIT" ]
null
null
null
test/test_get_transaction_details_by_transaction_id_response_item_blockchain_specific.py
xan187/Crypto_APIs_2.0_SDK_Python
a56c75df54ef037b39be1315ed6e54de35bed55b
[ "MIT" ]
1
2021-07-21T03:35:18.000Z
2021-07-21T03:35:18.000Z
""" CryptoAPIs Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501 The version of the OpenAPI document: 2.0.0 Contact: developers@cryptoapis.io Generated by: https://openapi-generator.tech """ import sys import unittest import cryptoapis from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_bitcoin import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificBitcoin from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_bitcoin_cash import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificBitcoinCash from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_dash import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDash from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_dash_vin import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDashVin from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_dash_vout import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDashVout from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_dogecoin import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDogecoin from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_ethereum import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereum from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_ethereum_classic import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereumClassic from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_ethereum_classic_gas_price import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereumClassicGasPrice from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_litecoin import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificLitecoin globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificBitcoin'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificBitcoin globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificBitcoinCash'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificBitcoinCash globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDash'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDash globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDashVin'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDashVin globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDashVout'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDashVout globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDogecoin'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDogecoin globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereum'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereum globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereumClassic'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereumClassic globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereumClassicGasPrice'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereumClassicGasPrice globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificLitecoin'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificLitecoin from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecific class TestGetTransactionDetailsByTransactionIDResponseItemBlockchainSpecific(unittest.TestCase): """GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecific unit test stubs""" def setUp(self): pass def tearDown(self): pass def testGetTransactionDetailsByTransactionIDResponseItemBlockchainSpecific(self): """Test GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecific""" # FIXME: construct object with mandatory attributes with example values # model = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecific() # noqa: E501 pass if __name__ == '__main__': unittest.main()
90.666667
484
0.909443
348
5,168
13.149425
0.33046
0.033654
0.045673
0.052885
0.218313
0.218313
0.218313
0.218313
0.218313
0.218313
0
0.003083
0.05863
5,168
56
485
92.285714
0.937513
0.180147
0
0.090909
0
0
0.18275
0.180843
0
1
0
0.017857
0
1
0.090909
false
0.090909
0.424242
0
0.545455
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
1
0
1
0
0
0
0
1
1
0
1
0
0
6
c2b5b21620b9700cb8ad7260e94e93fb05dfdd01
417
py
Python
templatedir/nice/objective.py
Kooper95/Shape-optimiser
caff58644bf64f5425fef5047688098d71b062b0
[ "MIT" ]
null
null
null
templatedir/nice/objective.py
Kooper95/Shape-optimiser
caff58644bf64f5425fef5047688098d71b062b0
[ "MIT" ]
null
null
null
templatedir/nice/objective.py
Kooper95/Shape-optimiser
caff58644bf64f5425fef5047688098d71b062b0
[ "MIT" ]
null
null
null
import sys #if len(sys.argv) == 1: # print(200000) #else: Powerout = (float(sys.argv[1]) - 298.15) * 3.14159265 * 0.1 * 0.1 * 0.22 / 0.005 #p = 611.21 * 2.718281828 ** ((18.678 - (float(sys.argv[1])-273.15)/234.5)*((float(sys.argv[1])-273.15)/(float(sys.argv[1])-16.01))) #Powerin = float(sys.argv[2]) * 2256600 * 18.01528 * p/(1000 * 8.31446261815324 * float(sys.argv[1])) print(Powerout + float(sys.argv[2]))
34.75
132
0.609113
76
417
3.342105
0.473684
0.220472
0.330709
0.255906
0.141732
0.141732
0
0
0
0
0
0.303867
0.131894
417
11
133
37.909091
0.39779
0.659472
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0.333333
0
0
0
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
665d4ee8e498a7f6eb6e8727b64c0dc527112f25
101
py
Python
src/articles/utils.py
robzzy/articles-service
a37b0f382ec5544c9f67236672e8325de8d8cf6b
[ "MIT" ]
null
null
null
src/articles/utils.py
robzzy/articles-service
a37b0f382ec5544c9f67236672e8325de8d8cf6b
[ "MIT" ]
null
null
null
src/articles/utils.py
robzzy/articles-service
a37b0f382ec5544c9f67236672e8325de8d8cf6b
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from datetime import datetime def utcnow(): return datetime.utcnow()
11.222222
29
0.643564
12
101
5.416667
0.75
0
0
0
0
0
0
0
0
0
0
0.0125
0.207921
101
8
30
12.625
0.8
0.207921
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
1
0
0
0
6
666f9404de84023163423cae5f0e889d7d73b5cf
114
py
Python
tatc/tatc/__init__.py
code-lab-org/tatc
51ab32d69923e99637b8939bca6965ba218d6056
[ "BSD-3-Clause" ]
null
null
null
tatc/tatc/__init__.py
code-lab-org/tatc
51ab32d69923e99637b8939bca6965ba218d6056
[ "BSD-3-Clause" ]
null
null
null
tatc/tatc/__init__.py
code-lab-org/tatc
51ab32d69923e99637b8939bca6965ba218d6056
[ "BSD-3-Clause" ]
null
null
null
from . import analysis from . import generation from . import schemas from . import constants from . import utils
19
24
0.780702
15
114
5.933333
0.466667
0.561798
0
0
0
0
0
0
0
0
0
0
0.175439
114
5
25
22.8
0.946809
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
6698840643e395fa623f15782641264872cbd6d1
77
py
Python
Code_Challenges/fizz_buzz.py
fuse999/Python_Sandbox
83d9c33a9c9e6e5cff40bbc6be525c9e604e9e41
[ "MIT" ]
null
null
null
Code_Challenges/fizz_buzz.py
fuse999/Python_Sandbox
83d9c33a9c9e6e5cff40bbc6be525c9e604e9e41
[ "MIT" ]
null
null
null
Code_Challenges/fizz_buzz.py
fuse999/Python_Sandbox
83d9c33a9c9e6e5cff40bbc6be525c9e604e9e41
[ "MIT" ]
null
null
null
def fizz_buzz(num): return "Fizz"*(num%3==0) + "Buzz"*(num%5==0) or str(num)
38.5
57
0.61039
16
77
2.875
0.625
0.304348
0
0
0
0
0
0
0
0
0
0.057971
0.103896
77
2
57
38.5
0.608696
0
0
0
0
0
0.102564
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
669f4cf088ed7bab4dba3af533966368a6931102
5,790
py
Python
tsfeatures/tsfeatures_r.py
vishalbelsare/tsfeatures-1
554581d795344a023d14cddcbbf52c491b2d6e14
[ "MIT" ]
57
2020-01-28T02:00:19.000Z
2021-08-20T19:19:51.000Z
tsfeatures/tsfeatures_r.py
vishalbelsare/tsfeatures-1
554581d795344a023d14cddcbbf52c491b2d6e14
[ "MIT" ]
9
2019-11-30T23:56:39.000Z
2021-09-01T17:27:13.000Z
tsfeatures/tsfeatures_r.py
vishalbelsare/tsfeatures-1
554581d795344a023d14cddcbbf52c491b2d6e14
[ "MIT" ]
18
2020-01-28T02:00:34.000Z
2021-07-29T19:57:22.000Z
#!/usr/bin/env python # coding: utf-8 from typing import List import pandas as pd import rpy2.robjects as robjects from rpy2.robjects import pandas2ri def tsfeatures_r(ts: pd.DataFrame, freq: int, features: List[str] = ["length", "acf_features", "arch_stat", "crossing_points", "entropy", "flat_spots", "heterogeneity", "holt_parameters", "hurst", "hw_parameters", "lumpiness", "nonlinearity", "pacf_features", "stability", "stl_features", "unitroot_kpss", "unitroot_pp"], **kwargs) -> pd.DataFrame: """tsfeatures wrapper using r. Parameters ---------- ts: pandas df Pandas DataFrame with columns ['unique_id', 'ds', 'y']. Long panel of time series. freq: int Frequency of the time series. features: List[str] String list of features to calculate. **kwargs: Arguments used by the original tsfeatures function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html """ rstring = """ function(df, freq, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) dt <- as.data.table(df) setkey(dt, unique_id) series_list <- split(dt, by = "unique_id", keep.by = FALSE) series_list <- lapply(series_list, function(serie) serie[, ts(y, frequency = freq)]) if("hw_parameters" %in% features){ features <- setdiff(features, "hw_parameters") if(length(features)>0){ hw_series_features <- suppressMessages(tsfeatures(series_list, "hw_parameters", ...)) names(hw_series_features) <- paste0("hw_", names(hw_series_features)) series_features <- suppressMessages(tsfeatures(series_list, features, ...)) series_features <- cbind(series_features, hw_series_features) } else { series_features <- suppressMessages(tsfeatures(series_list, "hw_parameters", ...)) names(series_features) <- paste0("hw_", names(series_features)) } } else { series_features <- suppressMessages(tsfeatures(series_list, features, ...)) } setDT(series_features) series_features[, unique_id := names(series_list)] } """ pandas2ri.activate() rfunc = robjects.r(rstring) feats = rfunc(ts, freq, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats = feats.rename(columns=renamer) return feats def tsfeatures_r_wide(ts: pd.DataFrame, features: List[str] = ["length", "acf_features", "arch_stat", "crossing_points", "entropy", "flat_spots", "heterogeneity", "holt_parameters", "hurst", "hw_parameters", "lumpiness", "nonlinearity", "pacf_features", "stability", "stl_features", "unitroot_kpss", "unitroot_pp"], **kwargs) -> pd.DataFrame: """tsfeatures wrapper using r. Parameters ---------- ts: pandas df Pandas DataFrame with columns ['unique_id', 'seasonality', 'y']. Wide panel of time series. features: List[str] String list of features to calculate. **kwargs: Arguments used by the original tsfeatures function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html """ rstring = """ function(uids, seasonalities, ys, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) suppressMessages(library(purrr)) series_list <- pmap( list(uids, seasonalities, ys), function(uid, seasonality, y) ts(y, frequency=seasonality) ) names(series_list) <- uids if("hw_parameters" %in% features){ features <- setdiff(features, "hw_parameters") if(length(features)>0){ hw_series_features <- suppressMessages(tsfeatures(series_list, "hw_parameters", ...)) names(hw_series_features) <- paste0("hw_", names(hw_series_features)) series_features <- suppressMessages(tsfeatures(series_list, features, ...)) series_features <- cbind(series_features, hw_series_features) } else { series_features <- suppressMessages(tsfeatures(series_list, "hw_parameters", ...)) names(series_features) <- paste0("hw_", names(series_features)) } } else { series_features <- suppressMessages(tsfeatures(series_list, features, ...)) } setDT(series_features) series_features[, unique_id := names(series_list)] } """ pandas2ri.activate() rfunc = robjects.r(rstring) uids = ts['unique_id'].to_list() seasonalities = ts['seasonality'].to_list() ys = ts['y'].to_list() feats = rfunc(uids, seasonalities, ys, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats = feats.rename(columns=renamer) return feats
37.115385
105
0.54905
526
5,790
5.857414
0.222433
0.127231
0.041545
0.103862
0.790652
0.790652
0.790652
0.790652
0.743265
0.743265
0
0.003612
0.33057
5,790
155
106
37.354839
0.79128
0.143523
0
0.666667
0
0
0.675564
0.17546
0
0
0
0
0
1
0.021505
false
0
0.043011
0
0.086022
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
66a8f9b8cc1a5e38469c82b0fbb8d4d59fa5a00b
48
py
Python
ravel/ext/grpc/proto/__init__.py
gigaquads/pybiz
e9654592246be06a777934e889e03407c5c1673e
[ "MIT" ]
2
2021-02-26T15:30:44.000Z
2021-05-22T14:06:17.000Z
ravel/ext/grpc/proto/__init__.py
gigaquads/ravel
e9654592246be06a777934e889e03407c5c1673e
[ "MIT" ]
null
null
null
ravel/ext/grpc/proto/__init__.py
gigaquads/ravel
e9654592246be06a777934e889e03407c5c1673e
[ "MIT" ]
null
null
null
from .message_generator import MessageGenerator
24
47
0.895833
5
48
8.4
1
0
0
0
0
0
0
0
0
0
0
0
0.083333
48
1
48
48
0.954545
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
66b770c881e21fad34af50dcae6abd39179c47c7
24
py
Python
IBRAHIM/OPENCV(gözdengeçir)/opencv13.py
vektorelpython24proje/temelbilgiler
bced2723d247dbb8b10cf86e25ee209635f82921
[ "MIT" ]
null
null
null
IBRAHIM/OPENCV(gözdengeçir)/opencv13.py
vektorelpython24proje/temelbilgiler
bced2723d247dbb8b10cf86e25ee209635f82921
[ "MIT" ]
null
null
null
IBRAHIM/OPENCV(gözdengeçir)/opencv13.py
vektorelpython24proje/temelbilgiler
bced2723d247dbb8b10cf86e25ee209635f82921
[ "MIT" ]
3
2020-10-24T14:36:14.000Z
2020-10-24T14:41:13.000Z
import cv2,numpy as np
8
22
0.75
5
24
3.6
1
0
0
0
0
0
0
0
0
0
0
0.052632
0.208333
24
2
23
12
0.894737
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
dd2a5cf8f71832711ba0041d79936803628b02c3
88
py
Python
bot/utils/__init__.py
famaxth/Russian-Qiwi-Bot
d5b0f23516343205ca7bad15b2d2fae7b675f584
[ "MIT" ]
null
null
null
bot/utils/__init__.py
famaxth/Russian-Qiwi-Bot
d5b0f23516343205ca7bad15b2d2fae7b675f584
[ "MIT" ]
null
null
null
bot/utils/__init__.py
famaxth/Russian-Qiwi-Bot
d5b0f23516343205ca7bad15b2d2fae7b675f584
[ "MIT" ]
null
null
null
from . import db_api from . import misc from .notify_admins import on_startup_notify
22
45
0.795455
14
88
4.714286
0.642857
0.30303
0
0
0
0
0
0
0
0
0
0
0.170455
88
3
46
29.333333
0.90411
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
dd3a98d9913755c248179ace7fd4339b164d8375
60
py
Python
reprlearn/data/__init__.py
cocoaaa/ReprLearn
58dc682aa62dbd59201ccc55b9b26480ff3d6773
[ "MIT" ]
null
null
null
reprlearn/data/__init__.py
cocoaaa/ReprLearn
58dc682aa62dbd59201ccc55b9b26480ff3d6773
[ "MIT" ]
null
null
null
reprlearn/data/__init__.py
cocoaaa/ReprLearn
58dc682aa62dbd59201ccc55b9b26480ff3d6773
[ "MIT" ]
null
null
null
def data_fn(): print("src.data.__init__.py") # data_fn()
20
33
0.65
10
60
3.3
0.7
0.363636
0
0
0
0
0
0
0
0
0
0
0.133333
60
3
34
20
0.634615
0.15
0
0
0
0
0.4
0
0
0
0
0
0
1
0.5
true
0
0
0
0.5
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
1
0
6
dd58ae1c08481fd8fa7bad513b2a0bc59315eb1c
6,721
py
Python
src/cogs/image.py
Alone-ankush/Credo
cb7789589e31c46c8e1d699590a2abe6e8fe8d07
[ "MIT" ]
null
null
null
src/cogs/image.py
Alone-ankush/Credo
cb7789589e31c46c8e1d699590a2abe6e8fe8d07
[ "MIT" ]
null
null
null
src/cogs/image.py
Alone-ankush/Credo
cb7789589e31c46c8e1d699590a2abe6e8fe8d07
[ "MIT" ]
1
2021-11-22T16:11:52.000Z
2021-11-22T16:11:52.000Z
import discord from discord.ext import commands import aiohttp import requests class Image(commands.Cog, name='Image'): def __init__(self, bot): self.bot = bot @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def cat(self, ctx): """Gives You Random Image Of Cat""" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('http://aws.random.cat/meow') as r: data = await r.json() em = discord.Embed( title='Cat', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['file']) em.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested By {ctx.author.name}") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def dog(self, ctx): """Gives You Random Image Of Dog""" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('http://random.dog/woof.json') as r: data = await r.json() em = discord.Embed( title='Dog', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['url']) em.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested By {ctx.author.name}") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def fox(self, ctx): """Gives You Random Image Of Fox""" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/fox') as r: data = await r.json() em = discord.Embed( title='Fox', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested By {ctx.author.name}") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def panda(self, ctx): """Gives You Random Image Of Panda""" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/panda') as r: data = await r.json() em = discord.Embed( title='Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested By {ctx.author.name}") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def red_panda(self, ctx): """Gives You Random Image Of Red Panda""" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/red_panda') as r: data = await r.json() em = discord.Embed( title='Red Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested By {ctx.author.name}") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def bird(self, ctx): """Gives You Random Image Of Bird""" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/birb') as r: data = await r.json() em = discord.Embed( title='Bird', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested By {ctx.author.name}") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def kola(self, ctx): """Gives You Random Image Of Kola""" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/koala') as r: data = await r.json() em = discord.Embed( title='kola', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested By {ctx.author.name}") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def pikachu(self, ctx): """Gives You Random Image Or GIF Of Pikachu""" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/pikachu') as r: data = await r.json() em = discord.Embed( title='Pikachu', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested By {ctx.author.name}") await ctx.send(embed=em) # @commands.command() # @commands.cooldown(1, 10, commands.BucketType.user) # async def yt(self,ctx,comment:str): # """Comments On Youtube""" # url = f"https://some-random-api.ml/canvas/youtube-comment?avatar={ctx.author.avatar_url_as(format='png')}&username={ctx.author}&comment={comment}" # em = discord.Embed(color = ctx.author.color) # em.set_image(url=url) # em.set_footer(text=f"Requested by {ctx.author}", icon_url=ctx.author.avatar_url) # await ctx.send(embed=em) def setup(bot): bot.add_cog(Image(bot))
44.217105
156
0.545752
820
6,721
4.408537
0.112195
0.059751
0.041494
0.049793
0.870539
0.854772
0.833748
0.795021
0.776763
0.776763
0
0.006
0.330457
6,721
151
157
44.509934
0.797333
0.070972
0
0.690265
0
0
0.09877
0
0
0
0
0
0
1
0.017699
false
0
0.035398
0
0.061947
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
dd645fa056ebfd23c4caebdfdc2152ec117fdf48
214
py
Python
programmers/blind_phone_number.py
schio/algorithm_test
c240faca428a9adb2970591338d4792b2f4fb7f3
[ "MIT" ]
null
null
null
programmers/blind_phone_number.py
schio/algorithm_test
c240faca428a9adb2970591338d4792b2f4fb7f3
[ "MIT" ]
null
null
null
programmers/blind_phone_number.py
schio/algorithm_test
c240faca428a9adb2970591338d4792b2f4fb7f3
[ "MIT" ]
null
null
null
# https://programmers.co.kr/learn/courses/30/lessons/12948 def solution(phone_number): phone_number = list(phone_number) phone_number[:-4] = ["*"] * (len(phone_number) - 4) return "".join(phone_number)
35.666667
58
0.691589
29
214
4.896552
0.62069
0.464789
0.225352
0.309859
0
0
0
0
0
0
0
0.048387
0.130841
214
5
59
42.8
0.715054
0.261682
0
0
0
0
0.00641
0
0
0
0
0
0
1
0.25
false
0
0
0
0.5
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
6
dd7a97cab10721a7ac3a4595ec291fc2eb2c99df
27
py
Python
models/det/__init__.py
BruceHan98/OCHTPS
5bee02bcbff36029cd47b4802178216f980a4298
[ "MIT" ]
null
null
null
models/det/__init__.py
BruceHan98/OCHTPS
5bee02bcbff36029cd47b4802178216f980a4298
[ "MIT" ]
null
null
null
models/det/__init__.py
BruceHan98/OCHTPS
5bee02bcbff36029cd47b4802178216f980a4298
[ "MIT" ]
null
null
null
from .pannet import PANNet
13.5
26
0.814815
4
27
5.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.148148
27
1
27
27
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
dd959355e365d9613862ee93daf2b03fcc0afbb8
32
py
Python
main.py
Abhishek-P/py-hello-world-run-from-colab
efd9539f49dfd324e1e475321e2c7c5ecb70e3ac
[ "MIT" ]
null
null
null
main.py
Abhishek-P/py-hello-world-run-from-colab
efd9539f49dfd324e1e475321e2c7c5ecb70e3ac
[ "MIT" ]
null
null
null
main.py
Abhishek-P/py-hello-world-run-from-colab
efd9539f49dfd324e1e475321e2c7c5ecb70e3ac
[ "MIT" ]
null
null
null
print("Hello World! from Colab")
32
32
0.75
5
32
4.8
1
0
0
0
0
0
0
0
0
0
0
0
0.09375
32
1
32
32
0.827586
0
0
0
0
0
0.69697
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
b09e86d17222e2c84aa7c7682a198b25873a6309
28
py
Python
flask_web/flask_app/deep_learning/machine_learning/ml_utils.py
Yakings/system_demo
6ec9596db1e60e221054282a06d9129246e88f54
[ "Apache-2.0" ]
7
2021-09-02T06:47:35.000Z
2022-03-09T05:13:00.000Z
data/shapenet.py
pkudba/SCL
78e85344a579075d3d07ed77eab8e13144321c6a
[ "MIT" ]
null
null
null
data/shapenet.py
pkudba/SCL
78e85344a579075d3d07ed77eab8e13144321c6a
[ "MIT" ]
1
2020-08-18T10:55:10.000Z
2020-08-18T10:55:10.000Z
import os import numpy as np
14
18
0.821429
6
28
3.833333
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.178571
28
2
18
14
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
b09f6e59e32b5245279365673bc84979d193225e
177
py
Python
experiments/series_1/experiment_2/experiment_setup.py
TomaszOdrzygozdz/gym-splendor
aeb00605e105628188143a4bbd6280e9eb41c4f9
[ "MIT" ]
1
2020-03-09T18:56:01.000Z
2020-03-09T18:56:01.000Z
experiments/series_1/experiment_2/experiment_setup.py
TomaszOdrzygozdz/gym-splendor
aeb00605e105628188143a4bbd6280e9eb41c4f9
[ "MIT" ]
null
null
null
experiments/series_1/experiment_2/experiment_setup.py
TomaszOdrzygozdz/gym-splendor
aeb00605e105628188143a4bbd6280e9eb41c4f9
[ "MIT" ]
1
2019-10-25T13:09:40.000Z
2019-10-25T13:09:40.000Z
TRAIN_DIR = '/net/archive/groups/plggluna/plgtodrzygozdz/lvl1/train_epochs_new_eval' VALID_FILE = '/net/archive/groups/plggluna/plgtodrzygozdz/lvl1/valid_new/valid_eval.pickle'
59
91
0.841808
25
177
5.68
0.56
0.140845
0.225352
0.338028
0.591549
0.591549
0
0
0
0
0
0.011696
0.033898
177
2
92
88.5
0.818713
0
0
0
0
0
0.824859
0.824859
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
9ff0b81e53133aba91462aecf89de89bda59ec1a
129
py
Python
file_converters/ifcjson/__init__.py
IFCJSON-Team/IFC2JSON_python
20452c1c5d4461e6dc462c0c3855f3a213197279
[ "MIT" ]
15
2020-05-28T16:12:08.000Z
2022-02-17T15:12:19.000Z
file_converters/ifcjson/__init__.py
claudioperez/IFC2JSON_python
20452c1c5d4461e6dc462c0c3855f3a213197279
[ "MIT" ]
2
2020-08-03T07:06:21.000Z
2020-10-03T12:29:33.000Z
file_converters/ifcjson/__init__.py
claudioperez/IFC2JSON_python
20452c1c5d4461e6dc462c0c3855f3a213197279
[ "MIT" ]
8
2020-09-03T06:44:34.000Z
2021-05-19T06:11:05.000Z
from ifcjson.ifc2json4 import IFC2JSON4 from ifcjson.ifc2json5a import IFC2JSON5a # from ifcjson.to_ifcopenshell import JSON2IFC
32.25
46
0.868217
16
129
6.9375
0.5
0.297297
0
0
0
0
0
0
0
0
0
0.077586
0.100775
129
3
47
43
0.87931
0.341085
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
9ff12bd165478b168116c20184aa85f7204d868f
424
py
Python
src/07_mongoengine/service_central/nosql/mongo_setup.py
jabelk/mongodb-for-python-developers
36df20d18b6e74fca986d6b01a58f32e983efdbf
[ "MIT" ]
null
null
null
src/07_mongoengine/service_central/nosql/mongo_setup.py
jabelk/mongodb-for-python-developers
36df20d18b6e74fca986d6b01a58f32e983efdbf
[ "MIT" ]
null
null
null
src/07_mongoengine/service_central/nosql/mongo_setup.py
jabelk/mongodb-for-python-developers
36df20d18b6e74fca986d6b01a58f32e983efdbf
[ "MIT" ]
null
null
null
import mongoengine def global_init(): # this is where would pass in creds and port and such # name= is the database db name # when we define our classes we will refer to the "core" connection # default localhost and port mongoengine.register_connection(alias='core', name='demo_dealership') # could have multiple like # mongoengine.register_connection(alias='analytics', name='anotherDBname')
32.615385
78
0.731132
57
424
5.368421
0.719298
0.045752
0.189542
0.222222
0
0
0
0
0
0
0
0
0.193396
424
12
79
35.333333
0.894737
0.641509
0
0
0
0
0.131034
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0
0.666667
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
c680f37234049d05b70d11aac85295f6c4622c68
350
py
Python
clusterwrapper/clustermetrics.py
opennlp/DeepPhrase
54bd6ca96c12475e3c3ff3745a4eb7c245b6e870
[ "MIT" ]
2
2019-06-19T12:52:31.000Z
2020-05-20T15:29:56.000Z
clusterwrapper/clustermetrics.py
opennlp/DeepPhrase
54bd6ca96c12475e3c3ff3745a4eb7c245b6e870
[ "MIT" ]
5
2019-12-17T05:44:10.000Z
2022-02-10T00:29:31.000Z
clusterwrapper/clustermetrics.py
opennlp/DeepPhrase
54bd6ca96c12475e3c3ff3745a4eb7c245b6e870
[ "MIT" ]
3
2019-10-06T13:31:31.000Z
2022-03-16T16:13:09.000Z
from sklearn.metrics import silhouette_score, calinski_harabaz_score def get_silhouette_coefficient(cluster_train_data,labels_assigned): return silhouette_score(cluster_train_data,labels_assigned) def get_calinski_harabaz_coefficient(cluster_train_data, labels_assigned): return calinski_harabaz_score(cluster_train_data, labels_assigned)
38.888889
74
0.88
45
350
6.333333
0.377778
0.168421
0.224561
0.308772
0.575439
0.575439
0.329825
0
0
0
0
0
0.074286
350
9
75
38.888889
0.87963
0
0
0
0
0
0
0
0
0
0
0
0
1
0.4
false
0
0.2
0.4
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
c6d555d859bb876aa1349e06152d5eef3dc029ae
117
py
Python
0x02-python-import_modules/5-variable_load.py
darkares23/holbertonschool-higher_level_programming
931b1b701d8a1d990b7cd931486496c0b5502e21
[ "MIT" ]
null
null
null
0x02-python-import_modules/5-variable_load.py
darkares23/holbertonschool-higher_level_programming
931b1b701d8a1d990b7cd931486496c0b5502e21
[ "MIT" ]
null
null
null
0x02-python-import_modules/5-variable_load.py
darkares23/holbertonschool-higher_level_programming
931b1b701d8a1d990b7cd931486496c0b5502e21
[ "MIT" ]
null
null
null
#!/usr/bin/python3 if __name__ == "__main__": import variable_load_5 print("{:d}".format(variable_load_5.a))
23.4
43
0.683761
17
117
4
0.823529
0.352941
0.382353
0
0
0
0
0
0
0
0
0.029703
0.136752
117
4
44
29.25
0.643564
0.145299
0
0
0
0
0.121212
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0.333333
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
c6d683a4278c405775be1daf2baed45d4e050d96
126
py
Python
7kyu/jaden_casing_strings.py
nhsz/codewars
82703959e910254d6feff4162f78c6dbd7a1c3ed
[ "MIT" ]
1
2018-12-02T23:04:38.000Z
2018-12-02T23:04:38.000Z
7kyu/jaden_casing_strings.py
nhsz/codewars
82703959e910254d6feff4162f78c6dbd7a1c3ed
[ "MIT" ]
null
null
null
7kyu/jaden_casing_strings.py
nhsz/codewars
82703959e910254d6feff4162f78c6dbd7a1c3ed
[ "MIT" ]
null
null
null
# http://www.codewars.com/kata/5390bac347d09b7da40006f6/ import string def to_jaden_case(s): return string.capwords(s)
15.75
56
0.761905
17
126
5.529412
0.882353
0
0
0
0
0
0
0
0
0
0
0.144144
0.119048
126
7
57
18
0.702703
0.428571
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
af5762516a430f1426529d0c6968ccfdddc2c3a9
26
py
Python
__init__.py
pslustig/galfitwrap
f8f695083e3b10806aeb6fb0f748234bd840a0d2
[ "MIT" ]
null
null
null
__init__.py
pslustig/galfitwrap
f8f695083e3b10806aeb6fb0f748234bd840a0d2
[ "MIT" ]
null
null
null
__init__.py
pslustig/galfitwrap
f8f695083e3b10806aeb6fb0f748234bd840a0d2
[ "MIT" ]
null
null
null
from .galaxywrap import *
13
25
0.769231
3
26
6.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.153846
26
1
26
26
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
afd143bd5e84c3a69dfc14385c7dd489f2e9fbb7
51,669
py
Python
Bin/init.py
mfneirae/GrupLAC-Complete
f4ccefe2553b90015d28df0e8d7730b4bad37d84
[ "MIT" ]
null
null
null
Bin/init.py
mfneirae/GrupLAC-Complete
f4ccefe2553b90015d28df0e8d7730b4bad37d84
[ "MIT" ]
null
null
null
Bin/init.py
mfneirae/GrupLAC-Complete
f4ccefe2553b90015d28df0e8d7730b4bad37d84
[ "MIT" ]
1
2021-06-10T09:21:18.000Z
2021-06-10T09:21:18.000Z
# # # ############################################################################# # Copyright (c) 2018 Universidad Nacional de Colombia All Rights Reserved. # # This work was made as a development to improve data collection # for self-assessment and accreditation processes in the Vicedeanship # of academic affairs in the Engineering Faculty of the Universidad # Nacional de Colombia and is licensed under a Creative Commons # Attribution-NonCommercial - ShareAlike 4.0 International License # and MIT Licence. # # by Manuel Embus. # # For more information write me to jai@mfneirae.com # Or visit my webpage at https://mfneirae.com/ # ############################################################################# # # def inicio(): global GP_DATOS_BASE global GP_DATOS_BASE_CSV global GP_DATOS_INSTITUCIONES global GP_DATOS_INSTITUCIONES_CSV global GP_DATOS_LINEAS global GP_DATOS_LINEAS_CSV global GP_DATOS_SECTORES global GP_DATOS_SECTORES_CSV global GP_DATOS_INTEGRANTES global GP_DATOS_INTEGRANTES_CSV global REL_GRUPO_PRODUCTO global REL_GRUPO_PRODUCTO_CSV global GP_PROD_BIB global GP_PROD_BIB_CSV global GP_PROD_TEC global GP_PROD_TEC_CSV global GP_APROPIACION global GP_APROPIACION_CSV global GP_OBRAS global GP_OBRAS_CSV global GP_ACTIVIDADES global GP_ACTIVIDADES_CSV global v_colciencias_tipo_producto global inv_colciencias_tipo_producto GP_DATOS_BASE = [] GP_DATOS_INSTITUCIONES = [] GP_DATOS_LINEAS = [] GP_DATOS_SECTORES = [] GP_DATOS_INTEGRANTES = [] REL_GRUPO_PRODUCTO = [] GP_PROD_BIB = [] GP_PROD_TEC = [] GP_APROPIACION = [] GP_OBRAS = [] GP_ACTIVIDADES = [] GP_PROD_BIB_CSV=["CODGP_PROD_BIB; \ CODGP_PROD:\ Revista; \ Autor Original; \ Nombre Libro; \ ISBN/ISSN; \ Medio de Divulgación; \ URL; \ Fasciculos; \ Idioma Original; \ Idioma Traduccion; \ Edición; \ Serie; \ Página Inicial; \ Página Final ; \ \n"] GP_PROD_TEC_CSV=["CODGP_PROD_TEC; \ CODGP_PROD; \ Tema; \ Nombre Comerial; \ Nombre Proyecto; \ Tipo de Ciclo; \ NIT; \ Fecha de Registro; \ Tiene Productos; \ Disponibilidad; \ Objeto; \ Fecha Publicación; \ Número de Contrato; \ Acto Administrativo; \ \n"] GP_APROPIACION_CSV=["CODGP_PROD_APROPIACION; \ CODGP_PROD; \ Tipos de Participación; \ Fecha Inicio; \ Fecha Fin; \ Proyecto de Inv; \ Medio de publicación; \ Emisora; \ Número de Participantes; \ \n"] GP_OBRAS_CSV=["CODGP_PROD_OBRAS; \ CODGP_PROD; \ Fecha Creación; \ Disiplina de origen; \ Institución Licencia; \ Fecha Licencia; \ Distinciones; \ Selección Distinción; \ Productos Asociados; \ Número Derechos Autor/NIT; \ \n"] GP_ACTIVIDADES_CSV=["CODGP_PROD_FORM; \ CODGP_PROD; \ Nombre de Ferias; \ Fecha Inicio Curso; \ Tipo Orientación; \ Nombre Estudiante; \ Programa Académico; \ Valoración; \ Fecha fin Curso; \ Finalidad; \ Duración; \ \n"] REL_GRUPO_PRODUCTO_CSV =["CODGP_PROD; \ CODGP; \ GP_TIPO_PROD; \ Nombre Producto; \ Lugar; \ Año; \ Idioma; \ Páginas; \ Volumen; \ Editorial; \ Ambito; \ DOI; \ Descripción; \ Instituciones; \ Tipo Vincula Institu; \ Autores\n"] GP_DATOS_BASE_CSV = ["CODGP;\ Año Formación;\ Mes Formación;\ Lugar;\ Nombre Lider;\ Información Certificada;\ Página Web;\ Correo;\ Clasificación;\ Área del Conocimiento;\ Programa Nacional;\ Programa Nacional 2;\ Plan de trabajo;\ Estado del Arte;\ Objetivos;\ Retos;\ Visión\n"] GP_DATOS_INSTITUCIONES_CSV = ["CODGP_INSTI;\ CODGP;\ Nombre Institución\n"] GP_DATOS_LINEAS_CSV = ["CODGP_LINEA;\ CODGP;\ Línea de Investigación\n"] GP_DATOS_SECTORES_CSV = ["CODGP_SECTOR;\ CODGP;\ Sector\n"] GP_DATOS_INTEGRANTES_CSV = ["CODGP_INTEGRANTE;\ CODGP;\ COD_RG;\ CVLAC;\ NOMBRE COMPLETO;\ Tipo Vinculación;\ Horas de Dedicación;\ Duración Vinculación;\ Inicio Vinculación;\ Fin Vinculación;\ Fin Vinculación\n"] v_colciencias_tipo_producto = [ "COD_TIPO_PRODUCTO; \ TIPO_PRODUCTO_COL; \ SUB_TIPO_PRODUCTO_COL; \ TIPO_UAPA\n\ 0; \ Evento sin producto asociado; \ Evento sin producto asociado; \ Evento sin producto asociado\n\ 1; \ Redes de conocimiento; \ Redes de conocimiento; \ Redes de conocimiento\n\ 2; \ Producción bibliográfica - Trabajos en eventos (Capítulos de memoria) - Completo; \ Capítulos de memoria; \ Capítulos de memoria\n\ 3; \ Producción técnica - Presentación de trabajo - Comunicación; \ Presentación de trabajo; \ Trabajo de Comunicación\n\ 4; \ Demás trabajos - Demás trabajos - Póster; \ Demás trabajos; \ Poster\n\ 5; \ Producción técnica - Presentación de trabajo - Conferencia; \ Presentación de trabajo; \ Conferencia\n\ 6; \ Producción técnica - Presentación de trabajo - Ponencia; \ Presentación de trabajo; \ Ponencia\n\ 7; \ Estrategias pedagógicas para el fomento a la CTI; \ Estrategias pedagógicas; \ Estrategias pedagógicas\n\ 8; \ Producción bibliográfica - Artículo - Publicado en revista especializada; \ Publicado en revista especializada; \ Artículo\n\ 9; \ Producción bibliográfica - Artículo - Corto (Resumen); \ Corto (Resumen); \ Artículo\n\ 10; \ Estrategias pedagógicas para el fomento a la CTI; \ Estrategias pedagógicas; \ Estrategias pedagógicas\n\ 11; \ Producción bibliográfica - Artículo - Caso clínico; \ Caso Clínico; \ Artículo\n\ 12; \ Producción bibliográfica - Trabajos en eventos (Capítulos de memoria) - Resumen; \ Capítulo de Memoria; \ Resumen\n\ 13; \ Producción técnica - Presentación de trabajo - Congreso; \ Congreso; \ Congreso\n\ 14; \ Producción técnica - Presentación de trabajo - Simposio; \ Simposio; \ Simposio\n\ 15; \ Producción técnica - Presentación de trabajo - Seminario; \ Seminario; \ Seminario\n\ 16; \ Producción técnica - Presentación de trabajo - Otro; \ Otro; \ Otro\n\ 17; \ Producción bibliográfica - Libro - Libro resultado de investigación; \ Libro resultado de investigación; \ Libro\n\ 18; \ Producción bibliográfica - Libro - Otro libro publicado; \ Otro libro publicado; \ Libro - Otro\n\ 19; \ Producción bibliográfica - Libro - Libro pedagógico y/o de divulgación; \ Libro pedagógico y/o de divulgación; \ Libro - pedagógico\n\ 20; \ Otro capítulo de libro publicado; \ Otro capítulo de libro; \ Capítulo de libro - Otro\n\ 21; \ Capítulo de libro; \ Capítulo de libro; \ Capítulo de libro\n\ 22; \ Producción bibliográfica - Otro artículo publicado - Periódico de noticias; \ Periódico de noticias; \ Otro\n\ 23; \ Producción bibliográfica - Otro artículo publicado - Revista de divulgación; \ Revista de divulgación; \ Otro\n\ 24; \ Producción bibliográfica - Otro artículo publicado - Cartas al editor; \ Cartas al editor; \ Otro\n\ 25; \ Producción bibliográfica - Otro artículo publicado - Reseñas de libros; \ Reseñas de libros; \ Otro\n\ 26; \ Producción bibliográfica - Otro artículo publicado - Columna de opinión; \ Columnas de opinión; \ Otro\n\ 27; \ Producción bibliográfica - Documento de trabajo (Working Paper); \ Documento de trabajo (Working Paper); \ Otro\n\ 28; \ Producción bibliográfica - Traducciones - Artículo; \ Traducciones - Artículo; \ Traducciones\n\ 29; \ Producción bibliográfica - Traducciones - Libro; \ Traducciones - Libro; \ Traducciones\n\ 30; \ Producción bibliográfica - Traducciones - Otra; \ Traducciones - Otra; \ Traducciones\n\ 31; \ Producción bibliográfica - Otra producción bibliográfica - Introducción; \ Introducción; \ Otro\n\ 32; \ Producción bibliográfica - Otra producción bibliográfica - Prólogo; \ Prólogo; \ Otro\n\ 33; \ Producción bibliográfica - Otra producción bibliográfica - Epílogo; \ Epílogo; \ Otro\n\ 34; \ Producción bibliográfica - Otra producción bibliográfica - Otra; \ Otra; \ Otro\n\ 35; \ Producción técnica - Softwares - Computacional; \ Software; \ Software\n\ 36; \ Producción técnica - Productos tecnológicos - Gen Clonado; \ Productos tecnológicos - Gen Clonado; \ Productos tecnológicos\n\ 37; \ Producción técnica - Productos tecnológicos - Coleccion biologica de referencia con informacion sistematizada; \ Productos tecnológicos - Coleccion biologica de referencia con informacion sistematizada; \ Productos tecnológicos\n\ 38; \ Producción técnica - Productos tecnológicos - Otro; \ Productos tecnológicos - Otro; \ Productos tecnológicos\n\ 39; \ Producción técnica - Productos tecnológicos - Base de datos de referencia para investigación; \ Productos tecnológicos - Base de datos de referencia para investigación; \ Productos tecnológicos\n\ 40; \ Producción técnica - Diseño Industrial; \ Diseño Industrial; \ Otro\n\ 41; \ Producción técnica - Esquema de circuito integrado; \ Esquema de circuito integrado; \ Otro\n\ 42; \ Producción técnica - Innovaciones generadas de producción empresarial - Organizacional; \ Innovaciones generadas de producción empresarial - Organizacional; \ Innovaciones\n\ 43; \ Producción técnica - Innovaciones generadas de producción empresarial - Empresarial; \ Innovaciones generadas de producción empresarial - Empresarial; \ Innovaciones\n\ 44; \ Producción técnica - Variedad animal; \ Variedad animal; \ Otro\n\ 45; \ Producción técnica - Innovación de proceso o procedimiento; \ Innovación de proceso o procedimiento; \ Innovación\n\ 46; \ Producción técnica - Cartas, mapas o similares - Aerofotograma; \ Aerofotograma; \ Otro\n\ 47; \ Producción técnica - Cartas, mapas o similares - Carta; \ Carta; \ Otro\n\ 48; \ Producción técnica - Cartas, mapas o similares - Fotograma; \ Fotograma; \ Otro\n\ 49; \ Producción técnica - Cartas, mapas o similares - Mapa; \ Mapa; \ Otro\n\ 50; \ Producción técnica - Cartas, mapas o similares - Otra; \ Otra; \ Otro\n\ 51; \ Producción técnica - Variedad vegetal; \ Variedad vegetal; \ Otro\n\ 52; \ Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Servicios de proyectos de IDI; \ Servicios de proyectos de IDI; \ Otro\n\ 53; \ Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Comercialización de tecnología; \ Comercialización de tecnología; \ Otro\n\ 54; \ Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Análisis de competitividad; \ Análisis de competitividad; \ Otro\n\ 55; \ Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Informe técnico; \ Informe técnico; \ Otro\n\ 56; \ Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Otro; \ Otro; \ Otro\n\ 57; \ Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Acciones de transferencia tecnológica; \ Acciones de transferencia tecnológica; \ Otro\n\ 58; \ Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Desarrollo de productos; \ Desarrollo de productos; \ Otro\n\ 59; \ Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Implementación de sistemas de análisis; \ Implementación de sistemas de análisis; \ Otro\n\ 60; \ Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Consultoría en artes,arquitectura y diseño; \ Consultoría en artes,arquitectura y diseño; \ Otro\n\ 61; \ Producción técnica - Regulación, norma, reglamento o legislación - Ambiental o de Salud; \ Regulación, norma, reglamento o legislación - Ambiental o de Salud; \ Otro\n\ 62; \ Producción técnica - Regulación, norma, reglamento o legislación - Educativa; \ Regulación, norma, reglamento o legislación - Educativa; \ Otro\n\ 63; \ Producción técnica - Regulación, norma, reglamento o legislación - Social; \ Regulación, norma, reglamento o legislación - Social; \ Otro\n\ 64; \ Producción técnica - Regulación, norma, reglamento o legislación - Técnica; \ Regulación, norma, reglamento o legislación - Técnica; \ Otro\n\ 65; \ Producción técnica - Regulación, norma, reglamento o legislación - Guía de práctica clínica; \ Regulación, norma, reglamento o legislación - Guía de práctica clínica; \ Otro\n\ 66; \ Producción técnica - Regulación, norma, reglamento o legislación - Proyecto de ley; \ Regulación, norma, reglamento o legislación - Proyecto de ley; \ Otro\n\ 67; \ Producción técnica - Reglamento Técnico; \ Reglamento Técnico; \ Otro\n\ 68; \ Producción técnica - Empresa de base tecnológica - Spin-off; \ Empresa de base tecnológica - Spin-off; \ Otro\n\ 69; \ Producción técnica - Empresa de base tecnológica - Start-up; \ Empresa de base tecnológica - Start-up; \ Otro\n\ 70; \ Demás trabajos - Demás trabajos; \ Demás trabajos; \ Otro\n\ 71; \ Producción técnica - Signos; \ Signos; \ Otro\n\ 72; \ Producción técnica - Softwares - Multimedia; \ Multimedia; \ Software\n\ 73; \ Producción técnica - Softwares - Otra; \ Softwares - Otra; \ Software\n\ 74; \ Producción técnica - Regulación, norma, reglamento o legislación - Técnica - Básica; \ Técnica - Básica; \ Otro\n\ 75; \ Producción técnica - Regulación, norma, reglamento o legislación - Técnica - Ensayo; \ Técnica - Ensayo; \ Otro\n\ 76; \ Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Servicios de Proyectos de I+D+I; \ Servicios de Proyectos de I+D+I; \ Otro\n\ 77; \ Producción técnica - Regulación, norma, reglamento o legislación - Técnica - Proceso; \ Técnica - Proceso; \ Otro\n\ 78; \ Datos complementarios - Participación en comités de evaluación - Profesor titular; \ Participación en comités de evaluación - Profesor titular; \ Comités\n\ 79; \ Datos complementarios - Participación en comités de evaluación - Concurso docente; \ Participación en comités de evaluación - Concurso docente; \ Comités\n\ 80; \ Datos complementarios - Participación en comités de evaluación - Jefe de cátedra; \ Participación en comités de evaluación - Jefe de cátedra; \ Comités\n\ 81; \ Datos complementarios - Participación en comités de evaluación - Evaluación de cursos; \ Participación en comités de evaluación - Evaluación de cursos; \ Comités\n\ 82; \ Datos complementarios - Participación en comités de evaluación - Acreditación de programas; \ Participación en comités de evaluación - Acreditación de programas; \ Comités\n\ 83; \ Datos complementarios - Participación en comités de evaluación - Asignación de becas; \ Participación en comités de evaluación - Asignación de becas; \ Comités\n\ 84; \ Datos complementarios - Participación en comités de evaluación - Otra; \ Participación en comités de evaluación - Otra; \ Comités\n\ 85; \ Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Pregrado; \ Jurado Pregrado; \ Comités\n\ 86; \ Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Especialización; \ Jurado Especialización; \ Comités\n\ 87; \ Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Especialidad Médica; \ Jurado Especialidad Médica; \ Comités\n\ 88; \ Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Maestría; \ Jurado Maestría; \ Comités\n\ 89; \ Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Doctorado; \ Jurado Doctorado; \ Comités\n\ 90; \ Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Otra; \ Jurado Otra; \ Comités\n\ 91; \ Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Curso de perfeccionamiento/especialización; \ Jurado Especializaciones; \ Comités\n\ 96; \ Producción técnica - Signos Distintivos - Nombres comerciales; \ Signos Distintivos - Nombres comerciales; \ Nombres comerciales\n\ 92; \ Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Curso de perfeccionamiento/especialización; \ Jurado Especializaciones; \ Comités\n\ 93; \ Producción técnica - Plantas piloto - Planta piloto; \ Plantas piloto - Planta piloto; \ Planta piloto\n\ 94; \ Producción técnica - Prototipo - Industrial; \ Prototipo - Industrial; \ Industrial\n\ 95; \ Producción técnica - Signos Distintivos - Marcas; \ Signos Distintivos - Marcas; \ Marcas\n\ 96; \ Producción técnica - Signos Distintivos - Nombres comerciales; \ Signos Distintivos - Nombres comerciales; \ Nombres comerciales\n\ 97; \ Apropiación social y circularción del conocimiento - Ediciones - Anales; \ Ediciones - Anales; \ Analess\n\ 98; \ Apropiación social y circularción del conocimiento - Ediciones - Libro; \ Ediciones - Libro; \ Libro\n\ 92; \ Producción técnica - Prototipo - Servicios; \ Prototipo - Servicios; \ Servicios\n"] #*************************************************************************** #Insert #*************************************************************************** inv_colciencias_tipo_producto = [ "REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `cod_tipo_producto`,\ `tipo_producto_col`,\ `sub_tipo_producto_col`,\ `tipo_uapa`) VALUES (\ 0,\ 'Evento sin producto asociado',\ 'Evento sin producto asociado',\ 'Evento sin producto asociado');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 1,\ 'Redes de conocimiento',\ 'Redes de conocimiento',\ 'Redes de conocimiento');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 2,\ 'Producción bibliográfica - Trabajos en eventos (Capítulos de memoria) - Completo',\ 'Capítulos de memoria',\ 'Capítulos de memoria');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 3,\ 'Producción técnica - Presentación de trabajo - Comunicación',\ 'Presentación de trabajo',\ 'Trabajo de Comunicación');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 4,\ 'Demás trabajos - Demás trabajos - Póster',\ 'Demás trabajos',\ 'Poster');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 5,\ 'Producción técnica - Presentación de trabajo - Conferencia',\ 'Presentación de trabajo',\ 'Conferencia');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 6,\ 'Producción técnica - Presentación de trabajo - Ponencia',\ 'Presentación de trabajo',\ 'Ponencia');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 7,\ 'Estrategias pedagógicas para el fomento a la CTI',\ 'Estrategias pedagógicas',\ 'Estrategias pedagógicas');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 8,\ 'Producción bibliográfica - Artículo - Publicado en revista especializada',\ 'Publicado en revista especializada',\ 'Artículo');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 9,\ 'Producción bibliográfica - Artículo - Corto (Resumen)',\ 'Corto (Resumen)',\ 'Artículo');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 10,\ 'Estrategias pedagógicas para el fomento a la CTI',\ 'Estrategias pedagógicas',\ 'Estrategias pedagógicas');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 11,\ 'Producción bibliográfica - Artículo - Caso clínico',\ 'Caso Clínico',\ 'Artículo');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 12,\ 'Producción bibliográfica - Trabajos en eventos (Capítulos de memoria) - Resumen',\ 'Capítulo de Memoria',\ 'Resumen');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 13,\ 'Producción técnica - Presentación de trabajo - Congreso',\ 'Congreso',\ 'Congreso');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 14,\ 'Producción técnica - Presentación de trabajo - Simposio',\ 'Simposio',\ 'Simposio');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 15,\ 'Producción técnica - Presentación de trabajo - Seminario',\ 'Seminario',\ 'Seminario');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 16,\ 'Producción técnica - Presentación de trabajo - Otro',\ 'Otro',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 17,\ 'Producción bibliográfica - Libro - Libro resultado de investigación',\ 'Libro resultado de investigación',\ 'Libro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 18,\ 'Producción bibliográfica - Libro - Otro libro publicado',\ 'Otro libro publicado',\ 'Libro - Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 19,\ 'Producción bibliográfica - Libro - Libro pedagógico y/o de divulgación',\ 'Libro pedagógico y/o de divulgación',\ 'Libro - pedagógico');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 20,\ 'Otro capítulo de libro publicado',\ 'Otro capítulo de libro',\ 'Capítulo de libro - Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 21,\ 'Capítulo de libro',\ 'Capítulo de libro',\ 'Capítulo de libro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 22,\ 'Producción bibliográfica - Otro artículo publicado - Periódico de noticias',\ 'Periódico de noticias',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 23,\ 'Producción bibliográfica - Otro artículo publicado - Revista de divulgación',\ 'Revista de divulgación',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 24,\ 'Producción bibliográfica - Otro artículo publicado - Cartas al editor',\ 'Cartas al editor',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 25,\ 'Producción bibliográfica - Otro artículo publicado - Reseñas de libros',\ 'Reseñas de libros',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 26,\ 'Producción bibliográfica - Otro artículo publicado - Columna de opinión',\ 'Columnas de opinión',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 27,\ 'Producción bibliográfica - Documento de trabajo (Working Paper)',\ 'Documento de trabajo (Working Paper)',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 28,\ 'Producción bibliográfica - Traducciones - Artículo',\ 'Traducciones - Artículo',\ 'Traducciones');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 29,\ 'Producción bibliográfica - Traducciones - Libro',\ 'Traducciones - Libro',\ 'Traducciones');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 30,\ 'Producción bibliográfica - Traducciones - Otra',\ 'Traducciones - Otra',\ 'Traducciones');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 31,\ 'Producción bibliográfica - Otra producción bibliográfica - Introducción',\ 'Introducción',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 32,\ 'Producción bibliográfica - Otra producción bibliográfica - Prólogo',\ 'Prólogo',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 33,\ 'Producción bibliográfica - Otra producción bibliográfica - Epílogo',\ 'Epílogo',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 34,\ 'Producción bibliográfica - Otra producción bibliográfica - Otra',\ 'Otra',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 35,\ 'Producción técnica - Softwares - Computacional',\ 'Software',\ 'Software');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 36,\ 'Producción técnica - Productos tecnológicos - Gen Clonado',\ 'Productos tecnológicos - Gen Clonado',\ 'Productos tecnológicos');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 37,\ 'Producción técnica - Productos tecnológicos - Coleccion biologica de referencia con informacion sistematizada',\ 'Productos tecnológicos - Coleccion biologica de referencia con informacion sistematizada',\ 'Productos tecnológicos');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 38,\ 'Producción técnica - Productos tecnológicos - Otro',\ 'Productos tecnológicos - Otro',\ 'Productos tecnológicos');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 39,\ 'Producción técnica - Productos tecnológicos - Base de datos de referencia para investigación',\ 'Productos tecnológicos - Base de datos de referencia para investigación',\ 'Productos tecnológicos');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 40,\ 'Producción técnica - Diseño Industrial',\ 'Diseño Industrial',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 41,\ 'Producción técnica - Esquema de circuito integrado',\ 'Esquema de circuito integrado',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 42,\ 'Producción técnica - Innovaciones generadas de producción empresarial - Organizacional',\ 'Innovaciones generadas de producción empresarial - Organizacional',\ 'Innovaciones');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 43,\ 'Producción técnica - Innovaciones generadas de producción empresarial - Empresarial',\ 'Innovaciones generadas de producción empresarial - Empresarial',\ 'Innovaciones');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 44,\ 'Producción técnica - Variedad animal',\ 'Variedad animal',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 45,\ 'Producción técnica - Innovación de proceso o procedimiento',\ 'Innovación de proceso o procedimiento',\ 'Innovación');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 46,\ 'Producción técnica - Cartas, mapas o similares - Aerofotograma',\ 'Aerofotograma',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 47,\ 'Producción técnica - Cartas, mapas o similares - Carta',\ 'Carta',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 48,\ 'Producción técnica - Cartas, mapas o similares - Fotograma',\ 'Fotograma',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 49,\ 'Producción técnica - Cartas, mapas o similares - Mapa',\ 'Mapa',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 50,\ 'Producción técnica - Cartas, mapas o similares - Otra',\ 'Otra',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 51,\ 'Producción técnica - Variedad vegetal',\ 'Variedad vegetal',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 52,\ 'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Servicios de proyectos de IDI',\ 'Servicios de proyectos de IDI',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 53,\ 'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Comercialización de tecnología',\ 'Comercialización de tecnología',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 54,\ 'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Análisis de competitividad',\ 'Análisis de competitividad',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 55,\ 'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Informe técnico',\ 'Informe técnico',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 56,\ 'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Otro',\ 'Otro',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 57,\ 'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Acciones de transferencia tecnológica',\ 'Acciones de transferencia tecnológica',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 58,\ 'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Desarrollo de productos',\ 'Desarrollo de productos',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 59,\ 'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Implementación de sistemas de análisis',\ 'Implementación de sistemas de análisis',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 60,\ 'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Consultoría en artes,arquitectura y diseño',\ 'Consultoría en artes,arquitectura y diseño',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 61,\ 'Producción técnica - Regulación, norma, reglamento o legislación - Ambiental o de Salud',\ 'Regulación, norma, reglamento o legislación - Ambiental o de Salud',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 62,\ 'Producción técnica - Regulación, norma, reglamento o legislación - Educativa',\ 'Regulación, norma, reglamento o legislación - Educativa',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 63,\ 'Producción técnica - Regulación, norma, reglamento o legislación - Social',\ 'Regulación, norma, reglamento o legislación - Social',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 64,\ 'Producción técnica - Regulación, norma, reglamento o legislación - Técnica',\ 'Regulación, norma, reglamento o legislación - Técnica',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 65,\ 'Producción técnica - Regulación, norma, reglamento o legislación - Guía de práctica clínica',\ 'Regulación, norma, reglamento o legislación - Guía de práctica clínica',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 66,\ 'Producción técnica - Regulación, norma, reglamento o legislación - Proyecto de ley',\ 'Regulación, norma, reglamento o legislación - Proyecto de ley',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 67,\ 'Producción técnica - Reglamento Técnico',\ 'Reglamento Técnico',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 68,\ 'Producción técnica - Empresa de base tecnológica - Spin-off',\ 'Empresa de base tecnológica - Spin-off',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 69,\ 'Producción técnica - Empresa de base tecnológica - Start-up',\ 'Empresa de base tecnológica - Start-up',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 70,\ 'Demás trabajos - Demás trabajos',\ 'Demás trabajos',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 71,\ 'Producción técnica - Signos',\ 'Signos',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 72,\ 'Producción técnica - Softwares - Multimedia',\ 'Multimedia',\ 'Software');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 73,\ 'Producción técnica - Softwares - Otra',\ 'Softwares - Otra',\ 'Software');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 74,\ 'Producción técnica - Regulación, norma, reglamento o legislación - Técnica - Básica',\ 'Técnica - Básica',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 75,\ 'Producción técnica - Regulación, norma, reglamento o legislación - Técnica - Ensayo',\ 'Técnica - Ensayo',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 76,\ 'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Servicios de Proyectos de I+D+I',\ 'Servicios de Proyectos de I+D+I',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 77,\ 'Producción técnica - Regulación, norma, reglamento o legislación - Técnica - Proceso',\ 'Técnica - Proceso',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 78,\ 'Datos complementarios - Participación en comités de evaluación - Profesor titular',\ 'Participación en comités de evaluación - Profesor titular',\ 'Comités');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 79,\ 'Datos complementarios - Participación en comités de evaluación - Concurso docente',\ 'Participación en comités de evaluación - Concurso docente',\ 'Comités');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 80,\ 'Datos complementarios - Participación en comités de evaluación - Jefe de cátedra',\ 'articipación en comités de evaluación - Jefe de cátedra',\ 'Comités');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 81,\ 'Datos complementarios - Participación en comités de evaluación - Evaluación de cursos',\ 'Participación en comités de evaluación - Evaluación de cursos',\ 'Comités');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 82,\ 'Datos complementarios - Participación en comités de evaluación - Acreditación de programas',\ 'Participación en comités de evaluación - Acreditación de programas',\ 'Comités');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 83,\ 'Datos complementarios - Participación en comités de evaluación - Asignación de becas',\ 'Participación en comités de evaluación - Asignación de becas',\ 'Comités');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 84,\ 'Datos complementarios - Participación en comités de evaluación - Otra',\ 'Participación en comités de evaluación - Otra',\ 'Comités');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 85,\ 'Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Pregrado',\ 'Jurado Pregrado',\ 'Comités');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 86,\ 'Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Especialización',\ 'Jurado Especialización',\ 'Comités');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 87,\ 'Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Especialidad Médica',\ 'Jurado Especialidad Médica',\ 'Comités');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 88,\ 'Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Maestría',\ 'Jurado Maestría',\ 'Comités');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 89,\ 'Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Doctorado',\ 'Jurado Doctorado',\ 'Comités');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 90, \ 'Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Otra',\ 'Jurado Otra',\ 'Comités');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 92, \ 'Producción técnica - Prototipo - Servicios',\ 'Prototipo - Servicios',\ 'Servicios');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 93, \ 'Producción técnica - Plantas piloto - Planta piloto',\ 'Plantas piloto - Planta piloto',\ 'Planta piloto');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 94, \ 'Producción técnica - Prototipo - Industrial',\ 'Prototipo - Industrial',\ 'Industrial');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 95, \ 'Producción técnica - Signos Distintivos - Marcas',\ 'Signos Distintivos - Marcas',\ 'Marcas');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 96, \ 'Producción técnica - Signos Distintivos - Nombres comerciales',\ 'Signos Distintivos - Nombres comerciales',\ 'Nombres comerciales');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 97, \ 'Apropiación - Eventos Cientificos - Otro',\ 'Eventos Cientificos - Otro',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 98, \ 'Apropiación - Eventos Cientificos - Taller',\ 'Eventos Cientificos - Taller',\ 'Taller');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 99, \ 'Apropiación - Eventos Cientificos - Congreso',\ 'Eventos Cientificos - Congreso',\ 'Congreso');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 100, \ 'Apropiación - Eventos Cientificos - Encuentro',\ 'Eventos Cientificos - Encuentro',\ 'Encuentro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 101, \ 'Apropiación - Eventos Cientificos - Seminario',\ 'Eventos Cientificos - Seminario',\ 'Seminario');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 102, \ 'Apropiación - Eventos Cientificos - Simposio',\ 'Eventos Cientificos - Simposio',\ 'Simposio');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 103, \ 'Apropiación - Eventos Cientificos - Informes de investigación',\ 'Eventos Cientificos - Informes de investigación',\ 'Informes de investigación');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 104, \ 'Apropiación - Impresos - Manual',\ 'Impresos - Manual',\ 'Manual');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 105, \ 'Apropiación - Impresos - Boletín',\ 'Impresos - Boletín',\ 'Boletín');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 106, \ 'Apropiación - Contenido Multimedia - Comentario',\ 'Contenido Multimedia - Comentario',\ 'Comentario');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 107, \ 'Apropiación - Contenido Multimedia - Entrevista',\ 'Contenido Multimedia - Entrevista',\ 'Entrevista');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 108, \ 'Apropiación - Contenido Virtual - Página Web',\ 'Contenido Virtual - Página Web',\ 'Página Web');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 109, \ 'Apropiación - Estrategias de Comunicación - Estrategias de Comunicación',\ 'Estrategias de Comunicación - Estrategias de Comunicación',\ 'Estrategias de Comunicación');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 110, \ 'Apropiación - Estrategias Pedagógicas - Estrategias Pedagógicas para el fomento a la CTI',\ 'Estrategias Pedagógicas - Estrategias Pedagógicas para el fomento a la CTI',\ 'Estrategias Pedagógicas para el fomento a la CTI');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 111, \ 'Apropiación - Participación Ciudadana - Participación Ciudadana en Proyectos de CTI',\ 'Participación Ciudadana - Participación Ciudadana en Proyectos de CTI',\ 'Participación Ciudadana en Proyectos de CTI');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 112, \ 'Apropiación - Participación Ciudadana - Espacios de Participación Ciudadana',\ 'Participación Ciudadana - Espacios de Participación Ciudadana',\ 'Espacios de Participación Ciudadana');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 113, \ 'Producción en arte, arquitectura y diseño - Obras o productos - Obras o productos',\ 'Obras o productos - Obras o productos',\ 'Obras o productos');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 114, \ 'Actividades de Formación - Actividades de Formación - Asesorías al Programa Ondas',\ 'Actividades de Formación - Asesorías al Programa Ondas',\ 'Asesorías al Programa Ondas');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 115, \ 'Actividades de Formación - Curso de Corta Duración Dictados - Perfeccionamiento',\ 'Curso de Corta Duración Dictados - Perfeccionamiento',\ 'Perfeccionamiento');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 116, \ 'Actividades de Formación - Curso de Corta Duración Dictados - Extensión Extracurricular',\ 'Curso de Corta Duración Dictados - Extensión Extracurricular',\ 'Extensión Extracurricular');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 117, \ 'Actividades de Formación - Trabajos dirigidos/turorías - Monografía de conclusión de curso',\ 'Trabajos dirigidos/turorías - Monografía de conclusión de curso',\ 'Monografía de conclusión de curso');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 118, \ 'Actividades de Formación - Curso de Corta Duración Dictados - Otro',\ 'Curso de Corta Duración Dictados - Otro',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 119, \ 'Proyectos - Investigación, desarrollo e innovación - Proyectos',\ 'Investigación, desarrollo e innovación - Proyectos',\ 'Proyectos');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 120, \ 'Apropiación social y circularción del conocimiento - Revista',\ 'Investigación, desarrollo e innovación - Revista',\ 'Revista');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 121, \ 'Apropiación social y circularción del conocimiento - Cartilla',\ 'Contenidos Impresos - Cartilla',\ 'Cartilla');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 122, \ 'Actividades de Formación - Cursos de Corta Duración - Especialización',\ 'Cursos de Corta Duración - Especialización',\ 'Especialización');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 123, \ 'Apropiación - Contenidos Multimedia - Otro',\ 'Contenidos Multimedia - Otro',\ 'Otro');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 124, \ 'Apropiación - Contenidos Virtuales - Blog',\ 'Contenidos Virtuales - Blog',\ 'Blog');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 125, \ 'Apropiación - Contenidos Virtuales - Aplicativo',\ 'Contenidos Virtuales - Aplicativo',\ 'Aplicativo');\n\ REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \ `COD_TIPO_PRODUCTO`,\ `TIPO_PRODUCTO_COL`,\ `SUB_TIPO_PRODUCTO_COL`,\ `TIPO_UAPA`) VALUES (\ 91, \ 'Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Curso de perfeccionamiento/especialización',\ 'Jurado Especial',\ 'Comités');\n"]
29.831986
122
0.744953
6,414
51,669
5.743374
0.082476
0.166459
0.103426
0.083392
0.890466
0.885499
0.873772
0.864461
0.850915
0.814539
0
0.010224
0.116008
51,669
1,731
123
29.84922
0.7963
0.014361
0
0.503835
0
0.00118
0.262223
0.00065
0
0
0
0
0
1
0.00059
false
0
0
0
0.00059
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
a550fbf0806ccc2e40e0a75340a7b677dae1c755
78
py
Python
tests/printing/test_registry_rendering.py
anna-naden/qalgebra
e7641ef77a2433caf2f587df27235800b894b631
[ "MIT" ]
2
2020-08-17T12:18:19.000Z
2020-08-25T11:17:27.000Z
tests/printing/test_registry_rendering.py
anna-naden/qalgebra
e7641ef77a2433caf2f587df27235800b894b631
[ "MIT" ]
1
2022-01-13T10:29:18.000Z
2022-01-13T10:29:18.000Z
tests/printing/test_registry_rendering.py
anna-naden/qalgebra
e7641ef77a2433caf2f587df27235800b894b631
[ "MIT" ]
null
null
null
import os import pytest from qalgebra.utils.testing import datadir # TODO
8.666667
42
0.782051
11
78
5.545455
0.818182
0
0
0
0
0
0
0
0
0
0
0
0.179487
78
8
43
9.75
0.953125
0.051282
0
0
0
0
0
0
0
0
0
0.125
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
1
0
1
0
1
0
0
6
a552f91d3303c3c4ba023cfcd62f634b5b719020
44
py
Python
plugin/lighthouse/reader/__init__.py
x9090/lighthouse
b7378ee5948ba81900ef80538870f2e2b47610f4
[ "MIT" ]
1,741
2017-02-21T14:09:27.000Z
2022-03-30T19:49:25.000Z
plugin/lighthouse/reader/__init__.py
x9090/lighthouse
b7378ee5948ba81900ef80538870f2e2b47610f4
[ "MIT" ]
114
2017-03-12T21:46:16.000Z
2022-03-16T22:10:49.000Z
plugin/lighthouse/reader/__init__.py
x9090/lighthouse
b7378ee5948ba81900ef80538870f2e2b47610f4
[ "MIT" ]
264
2017-02-21T14:46:16.000Z
2022-03-14T12:21:15.000Z
from .coverage_reader import CoverageReader
22
43
0.886364
5
44
7.6
1
0
0
0
0
0
0
0
0
0
0
0
0.090909
44
1
44
44
0.95
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
a5b0d52e0078ea20025f0753051928c6476236c3
43
py
Python
h1st/core/__init__.py
Shiti/h1st
0805452bda2453924663203b11f448e31525d596
[ "Apache-2.0" ]
null
null
null
h1st/core/__init__.py
Shiti/h1st
0805452bda2453924663203b11f448e31525d596
[ "Apache-2.0" ]
null
null
null
h1st/core/__init__.py
Shiti/h1st
0805452bda2453924663203b11f448e31525d596
[ "Apache-2.0" ]
null
null
null
from .dataclass import NodeInfo, GraphInfo
21.5
42
0.837209
5
43
7.2
1
0
0
0
0
0
0
0
0
0
0
0
0.116279
43
1
43
43
0.947368
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
a5b3fe8a62484a08f85e5b9ff9a70af0ed0c8faf
225
py
Python
utils/start_server.py
FGAUnB-REQ-GM/2021.2-PousadaAnimal
b7371aebccad0da23073de0db642a6ce824f919e
[ "MIT" ]
null
null
null
utils/start_server.py
FGAUnB-REQ-GM/2021.2-PousadaAnimal
b7371aebccad0da23073de0db642a6ce824f919e
[ "MIT" ]
95
2022-02-04T19:40:09.000Z
2022-03-31T20:24:11.000Z
utils/start_server.py
FGAUnB-REQ-GM/2021.2-PousadaAnimal
b7371aebccad0da23073de0db642a6ce824f919e
[ "MIT" ]
4
2022-01-26T23:51:48.000Z
2022-01-27T18:28:16.000Z
from os import system # Database system('python3 manage.py makemigrations users pets hosting services message payment host') system('python3 manage.py migrate') # Server system('python3 manage.py runserver localhost:8000')
25
91
0.8
30
225
6
0.7
0.216667
0.316667
0.35
0
0
0
0
0
0
0
0.035354
0.12
225
9
92
25
0.873737
0.066667
0
0
0
0
0.711538
0
0
0
0
0
0
1
0
true
0
0.25
0
0.25
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
3c3b1f75cf099592f20b9f3fb8aa02e9dab8062a
148
py
Python
Chapter 2/wall_time.py
indrag49/Computational-Stat-Mech
0877f54a0245fce815f03478f4fb219fd6314951
[ "MIT" ]
19
2018-06-29T12:22:47.000Z
2022-03-10T03:18:18.000Z
Chapter 2/wall_time.py
indrag49/Computational-Stat-Mech
0877f54a0245fce815f03478f4fb219fd6314951
[ "MIT" ]
null
null
null
Chapter 2/wall_time.py
indrag49/Computational-Stat-Mech
0877f54a0245fce815f03478f4fb219fd6314951
[ "MIT" ]
7
2018-11-30T01:56:36.000Z
2021-12-23T15:29:56.000Z
from sympy import oo def wall_time(pos, vel, radius): return (1.0-radius-pos)/vel if vel>0.0 else (pos-radius)/abs(vel) if vel<0.0 else float(oo)
49.333333
125
0.709459
32
148
3.25
0.53125
0.115385
0.153846
0.173077
0.269231
0.269231
0
0
0
0
0
0.046875
0.135135
148
2
126
74
0.765625
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0.5
0.5
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
3c5deb62501fc581506d694af7891b5f422871dc
22,655
py
Python
app/utils.py
mirsazzathossain/SPMS-Project
eb2b9144b6ddb8d18c146a4c4d6f79b9f7a7eeb5
[ "MIT" ]
null
null
null
app/utils.py
mirsazzathossain/SPMS-Project
eb2b9144b6ddb8d18c146a4c4d6f79b9f7a7eeb5
[ "MIT" ]
null
null
null
app/utils.py
mirsazzathossain/SPMS-Project
eb2b9144b6ddb8d18c146a4c4d6f79b9f7a7eeb5
[ "MIT" ]
null
null
null
from django.db import connection import numpy as np def getstudentcoursewisePLO(studentID, courseID): with connection.cursor() as cursor: cursor.execute(''' SELECT p.ploNum as plonum,100*(sum(e.obtainedMarks)/sum(a.totalMarks)) as plopercent FROM app_registration_t r, app_assessment_t a, app_evaluation_t e, app_co_t co, app_plo_t p WHERE r.registrationID = e.registration_id and e.assessment_id = a.assessmentID and a.co_id=co.coID and co.plo_id = p.ploID and r.student_id = '{}' and co.course_id = '{}' GROUP BY p.ploID '''.format(studentID, courseID)) row = cursor.fetchall() return row def getcoursewiseavgPLO(courseID): with connection.cursor() as cursor: cursor.execute(''' SELECT p.ploNum as plonum, avg(100*e.obtainedMarks/a.totalMarks) FROM app_registration_t r, app_assessment_t a, app_evaluation_t e, app_co_t co, app_plo_t p WHERE r.registrationID = e.registration_id and e.assessment_id = a.assessmentID and a.co_id=co.coID and co.plo_id = p.ploID and co.course_id = '{}' GROUP BY p.ploID '''.format(courseID)) row = cursor.fetchall() return row def getcompletedcourses(studentID): with connection.cursor() as cursor: cursor.execute( ''' SELECT distinct s.course_id FROM app_registration_t r, app_evaluation_t e, app_section_t s WHERE r.registrationID = e.registration_id and r.section_id = s.sectionID and r.student_id = '{}' '''.format(studentID)) row = cursor.fetchall() return row def getcorrespondingstudentid(userID): with connection.cursor() as cursor: cursor.execute( ''' SELECT studentID FROM app_student_t s WHERE s.user_ptr_id = '{}' '''.format(userID)) row = cursor.fetchall() return row def getstudentprogramwisePLO(studentID): with connection.cursor() as cursor: cursor.execute(''' SELECT p.ploNum as plonum,100*(sum(e.obtainedMarks)/sum(a.totalMarks)) as plopercent FROM app_registration_t r, app_assessment_t a, app_evaluation_t e, app_co_t co, app_plo_t p, app_student_t s, app_program_t pr WHERE r.registrationID = e.registration_id and e.assessment_id = a.assessmentID and a.co_id=co.coID and co.plo_id = p.ploID and r.student_id = '{}' and s.studentID = r.student_id and s.program_id = pr.programID GROUP BY p.ploID '''.format(studentID)) row = cursor.fetchall() return row def getprogramwiseavgPLO(programID): with connection.cursor() as cursor: cursor.execute(''' SELECT p.ploNum as plonum, avg(100*e.obtainedMarks/a.totalMarks) FROM app_registration_t r, app_assessment_t a, app_evaluation_t e, app_co_t co, app_plo_t p WHERE r.registrationID = e.registration_id and e.assessment_id = a.assessmentID and a.co_id=co.coID and co.plo_id = p.ploID and p.program_id = '{}' GROUP BY p.ploID '''.format(programID)) row = cursor.fetchall() return row def getstudentprogramid(studentID): with connection.cursor() as cursor: cursor.execute(''' SELECT s.program_id FROM app_student_t s WHERE s.studentID = '{}' '''.format(studentID)) row = cursor.fetchall() return row def getstudentallcoursePLO(studentID, category): with connection.cursor() as cursor: cursor.execute(''' SELECT p.ploNum as ploNum,co.course_id,sum(e.obtainedMarks),sum(a.totalMarks), derived.Total FROM app_registration_t r, app_assessment_t a, app_evaluation_t e, app_co_t co, app_plo_t p, ( SELECT p.ploNum as ploNum,sum(a.totalMarks) as Total, r.student_id as StudentID FROM app_registration_t r, app_assessment_t a, app_evaluation_t e, app_co_t co, app_plo_t p WHERE r.registrationID = e.registration_id and e.assessment_id = a.assessmentID and a.co_id=co.coID and co.plo_id = p.ploID and r.student_id = '{}' GROUP BY r.student_id,p.ploID) derived WHERE r.student_id = derived.StudentID and e.registration_id = r.registrationID and e.assessment_id = a.assessmentID and a.co_id=co.coID and co.plo_id = p.ploID and p.ploNum = derived.ploNum GROUP BY p.ploID,co.course_id '''.format(studentID)) row = cursor.fetchall() table = [] courses = [] for entry in row: if entry[1] not in courses: courses.append(entry[1]) courses.sort() plo = ["PLO1", "PLO2", "PLO3", "PLO4", "PLO5", "PLO6", "PLO7", "PLO8", "PLO9", "PLO10", "PLO11", "PLO12"] for i in courses: temptable = [] if category == 'report': temptable = [i] for j in plo: found = False for k in row: if j == k[0] and i == k[1]: if category == 'report': temptable.append(np.round(100 * k[2] / k[3], 2)) elif category == 'chart': temptable.append(np.round(100 * k[2] / k[4], 2)) found = True if not found: if category == 'report': temptable.append('N/A') elif category == 'chart': temptable.append(0) table.append(temptable) return plo, courses, table def getfacultycoursewisePLO(courseID, semesters): sem = ''; for semester in semesters: sem += '"' sem += semester sem += '",' sem = sem[:-1] with connection.cursor() as cursor: cursor.execute(''' SELECT f.first_name, f.last_name, f.plonum, COUNT(*) as achieved_cnt FROM ( SELECT u.first_name, u.last_name, p.ploNum as plonum, 100*e.obtainedMarks/a.totalMarks as percentage FROM app_registration_t r, app_assessment_t a, app_evaluation_t e, app_co_t co, app_plo_t p, app_section_t s, accounts_user u, app_employee_t emp WHERE r.registrationID = e.registration_id and e.assessment_id = a.assessmentID and a.co_id=co.coID and co.plo_id = p.ploID and a.section_id = s.sectionID and s.faculty_id IN ( SELECT DISTINCT s.faculty_id FROM app_section_t s WHERE s.course_id = '{}' ) and s.semester IN ({}) and s.course_id ='{}' and s.faculty_id = emp.employeeID and emp.user_ptr_id = u.id )f WHERE f.percentage >= 40 GROUP BY f.first_name, f.plonum; '''.format(courseID, sem, courseID)) row1 = cursor.fetchall() cursor.execute(''' SELECT COUNT(*) FROM ( SELECT u.first_name, u.last_name, p.ploNum as plonum, 100*e.obtainedMarks/a.totalMarks as percentage FROM app_registration_t r, app_assessment_t a, app_evaluation_t e, app_co_t co, app_plo_t p, app_section_t s, accounts_user u, app_employee_t emp WHERE r.registrationID = e.registration_id and e.assessment_id = a.assessmentID and a.co_id=co.coID and co.plo_id = p.ploID and a.section_id = s.sectionID and s.faculty_id IN ( SELECT DISTINCT s.faculty_id FROM app_section_t s WHERE s.course_id = '{}' ) and s.semester IN ({}) and s.course_id ='{}' and s.faculty_id = emp.employeeID and emp.user_ptr_id = u.id )f GROUP BY f.first_name, f.plonum; '''.format(courseID, sem, courseID)) row2 = cursor.fetchall() faculty = [] plonum = [] plos1 = [] plos2 = [] for record in row1: faculty.append(record[0]+' '+record[1]) plonum.append(record[2]) plos1.append(record[3]) for record in row2: plos2.append(record[0]) plos = 100*(np.array(plos1)/np.array(plos2)) plos = plos.tolist() faculty = list(set(faculty)) plonum = list(set(plonum)) plonum.sort() plonum.sort(key=len, reverse=False) plos = np.array(plos) plos = np.split(plos, len(plos)/len(plonum)) new_plo=[] for plo in plos: new_plo.append(plo.tolist()) return faculty, plonum, new_plo def getsemestercoursewisePLO(courseID, semesters): sem = ''; for semester in semesters: sem += '"' sem += semester sem += '",' sem = sem[:-1] with connection.cursor() as cursor: cursor.execute(''' SELECT f.semester, f.plonum, COUNT(*) as achieved_cnt FROM ( SELECT s.semester, p.ploNum as plonum, s.course_id, 100*e.obtainedMarks/a.totalMarks as percentage FROM app_registration_t r, app_assessment_t a, app_evaluation_t e, app_co_t co, app_plo_t p, app_section_t s WHERE r.registrationID = e.registration_id and e.assessment_id = a.assessmentID and a.co_id=co.coID and co.plo_id = p.ploID and a.section_id = s.sectionID and s.semester IN ({}) and co.course_id ='{}' and s.course_id = co.course_id )f WHERE f.percentage >= 40 GROUP BY f.semester, f.plonum; '''.format(sem, courseID)) row1 = cursor.fetchall() cursor.execute(''' SELECT COUNT(*) as all_cnt FROM ( SELECT s.semester, p.ploNum as plonum, s.course_id, 100*e.obtainedMarks/a.totalMarks as percentage FROM app_registration_t r, app_assessment_t a, app_evaluation_t e, app_co_t co, app_plo_t p, app_section_t s WHERE r.registrationID = e.registration_id and e.assessment_id = a.assessmentID and a.co_id=co.coID and co.plo_id = p.ploID and a.section_id = s.sectionID and s.semester IN ({}) and co.course_id ='{}' and s.course_id = co.course_id )f GROUP BY f.semester, f.plonum; '''.format(sem, courseID)) row2 = cursor.fetchall() semester = [] plonum = [] acheived = [] all_cnt = [] for record in row1: semester.append(record[0]) plonum.append(record[1]) acheived.append(record[2]) for record in row2: all_cnt.append(record[0]) acheived_per = 100*(np.array(acheived)/np.array(all_cnt)) semester = list(set(semester)) plonum = list(set(plonum)) failed_per = 100 - acheived_per acheived_per = np.split(acheived_per, len(acheived_per)/len(semester)) failed_per = np.split(failed_per, len(failed_per)/len(semester)) acheived=[] for plo in acheived_per: acheived.append(plo.tolist()) failed=[] for plo in failed_per: failed.append(plo.tolist()) return semester, plonum, acheived, failed def getplowisecoursecomparism(plos, semesters): sem = ''; for semester in semesters: sem += '"' sem += semester sem += '",' sem = sem[:-1] ploo = ''; for plo in plos: ploo += '"' ploo += plo ploo += '",' ploo = ploo[:-1] with connection.cursor() as cursor: cursor.execute(''' SELECT f.course_id, f.ploNum, COUNT(*) FROM ( SELECT s.course_id, p.ploNum, 100*e.obtainedMarks/a.totalMarks as percentage FROM app_registration_t r, app_assessment_t a, app_evaluation_t e, app_co_t co, app_plo_t p, app_section_t s WHERE r.registrationID = e.registration_id and e.assessment_id = a.assessmentID and a.co_id=co.coID and co.plo_id = p.ploID and p.ploNum in ({}) and a.section_id = s.sectionID and s.semester IN ({}) )f WHERE f.percentage >= 40 GROUP BY f.ploNum, f.course_id; '''.format(ploo, sem)) row1 = cursor.fetchall() with connection.cursor() as cursor: cursor.execute(''' SELECT COUNT(*) FROM ( SELECT s.course_id, p.ploNum, 100*e.obtainedMarks/a.totalMarks as percentage FROM app_registration_t r, app_assessment_t a, app_evaluation_t e, app_co_t co, app_plo_t p, app_section_t s WHERE r.registrationID = e.registration_id and e.assessment_id = a.assessmentID and a.co_id=co.coID and co.plo_id = p.ploID and p.ploNum in ({}) and a.section_id = s.sectionID and s.semester IN ({}) )f GROUP BY f.ploNum, f.course_id; '''.format(ploo, sem)) row2 = cursor.fetchall() courses = [] plonum = [] acheived = [] all_cnt = [] for record in row1: courses.append(record[0]) plonum.append(record[1]) acheived.append(record[2]) for record in row2: all_cnt.append(record[0]) acheived_per = 100*(np.array(acheived)/np.array(all_cnt)) courses = list(set(courses)) plonum = list(set(plonum)) acheived_per = np.split(acheived_per, len(acheived_per)/len(plonum)) acheived=[] for plo in acheived_per: acheived.append(plo.tolist()) return courses, plonum, acheived def getprogramsemesterwiseplocount(program, semesters): sem = ''; for semester in semesters: sem += '"' sem += semester sem += '",' sem = sem[:-1] with connection.cursor() as cursor: cursor.execute(''' SELECT f.plonum, COUNT(*) FROM ( SELECT p.ploNum as plonum, r.student_id, 100*e.obtainedMarks/a.totalMarks as percentage FROM app_registration_t r, app_assessment_t a, app_evaluation_t e, app_co_t co, app_plo_t p, app_section_t s, app_program_t prog WHERE r.registrationID = e.registration_id and e.assessment_id = a.assessmentID and a.co_id=co.coID and co.plo_id = p.ploID and p.program_id = prog.programID and prog.programName = '{}' and a.section_id = s.sectionID and s.semester IN ({}) )f WHERE f.percentage>=40 GROUP BY f.plonum; '''.format(program, sem)) row1 = cursor.fetchall() with connection.cursor() as cursor: cursor.execute(''' SELECT COUNT(*) FROM ( SELECT p.ploNum as plonum, r.student_id, 100*e.obtainedMarks/a.totalMarks as percentage FROM app_registration_t r, app_assessment_t a, app_evaluation_t e, app_co_t co, app_plo_t p, app_section_t s, app_program_t prog WHERE r.registrationID = e.registration_id and e.assessment_id = a.assessmentID and a.co_id=co.coID and co.plo_id = p.ploID and p.program_id = prog.programID and prog.programName = '{}' and a.section_id = s.sectionID and s.semester IN ({}) )f GROUP BY f.plonum; '''.format(program, sem)) row2 = cursor.fetchall() plonum = [] acheived = [] attempted = [] for record in row1: plonum.append(record[0]) acheived.append(record[1]) for record in row2: attempted.append(record[0]) plonum = list(set(plonum)) acheived = np.array(acheived) attempted = np.array(attempted) new_acheived=[] for plo in acheived: new_acheived.append(plo.tolist()) new_attempted=[] for plo in attempted: new_attempted.append(plo.tolist()) plonum.sort() plonum.sort(key=len, reverse=False) return plonum, new_acheived, new_attempted def getprogramwiseploandcourses(program, semesters): sem = ''; for semester in semesters: sem += '"' sem += semester sem += '",' sem = sem[:-1] with connection.cursor() as cursor: cursor.execute(''' SELECT f.ploNum, f.course_id, COUNT(*) FROM ( SELECT p.ploNum as plonum, s.course_id, r.student_id, 100*e.obtainedMarks/a.totalMarks as percentage FROM app_registration_t r, app_assessment_t a, app_evaluation_t e, app_co_t co, app_plo_t p, app_section_t s, app_program_t prog WHERE r.registrationID = e.registration_id and e.assessment_id = a.assessmentID and a.co_id=co.coID and co.plo_id = p.ploID and p.program_id = prog.programID and prog.programName = '{}' and a.section_id = s.sectionID and s.semester IN ({}) )f WHERE f.percentage>=40 GROUP BY f.ploNum, f.course_id '''.format(program, sem)) row = cursor.fetchall() plonum = [] courses = [] counts = [] for record in row: plonum.append(record[0]) courses.append(record[1]) plonum = list(set(plonum)) plonum.sort() plonum.sort(key=len, reverse=False) courses = list(set(courses)) courses.sort() table = np.zeros((len(courses), len(plonum))) for record in row: table[courses.index(record[1])][plonum.index(record[0])] += record[2] table = table.tolist() return plonum, courses, table
35.343214
122
0.455352
2,317
22,655
4.294778
0.067328
0.020098
0.032459
0.032158
0.773792
0.743543
0.72445
0.705758
0.666566
0.62848
0
0.011656
0.462238
22,655
641
123
35.343214
0.805138
0
0
0.761194
0
0.020522
0.636851
0.025595
0
0
0
0
0
1
0.024254
false
0
0.003731
0
0.052239
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
3c66bd87520d7163328c7042283d619a6348309f
45
py
Python
pyaugmecon/__init__.py
vishalbelsare/pyaugmecon
b9b6310b66007d1be7035f50a7e2691e7669f74e
[ "MIT" ]
5
2021-05-29T20:18:06.000Z
2022-01-20T08:56:26.000Z
pyaugmecon/__init__.py
vishalbelsare/pyaugmecon
b9b6310b66007d1be7035f50a7e2691e7669f74e
[ "MIT" ]
null
null
null
pyaugmecon/__init__.py
vishalbelsare/pyaugmecon
b9b6310b66007d1be7035f50a7e2691e7669f74e
[ "MIT" ]
3
2021-08-20T19:27:28.000Z
2022-01-21T13:42:49.000Z
from pyaugmecon.pyaugmecon import PyAugmecon
22.5
44
0.888889
5
45
8
0.6
0
0
0
0
0
0
0
0
0
0
0
0.088889
45
1
45
45
0.97561
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
593e1e17a425e56881cf41a7f836ee82374b8d59
171
py
Python
OpenGLCffi/GLX/EXT/SGIX/dmbuffer.py
cydenix/OpenGLCffi
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
[ "MIT" ]
null
null
null
OpenGLCffi/GLX/EXT/SGIX/dmbuffer.py
cydenix/OpenGLCffi
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
[ "MIT" ]
null
null
null
OpenGLCffi/GLX/EXT/SGIX/dmbuffer.py
cydenix/OpenGLCffi
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
[ "MIT" ]
null
null
null
from OpenGLCffi.GLX import params @params(api='glx', prms=['dpy', 'pbuffer', 'params', 'dmbuffer']) def glXAssociateDMPbufferSGIX(dpy, pbuffer, params, dmbuffer): pass
24.428571
65
0.730994
20
171
6.25
0.65
0.16
0.256
0.384
0
0
0
0
0
0
0
0
0.105263
171
6
66
28.5
0.816993
0
0
0
0
0
0.159763
0
0
0
0
0
0
1
0.25
false
0.25
0.25
0
0.5
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
6
3ca729505e43392eecb38753ae74fd29ff951cd7
1,202
py
Python
code/functions/segment/__init__.py
a9w/Fat2_polarizes_WAVE
be39ba21245a9b532a70954a38139976a2355a7d
[ "MIT" ]
null
null
null
code/functions/segment/__init__.py
a9w/Fat2_polarizes_WAVE
be39ba21245a9b532a70954a38139976a2355a7d
[ "MIT" ]
null
null
null
code/functions/segment/__init__.py
a9w/Fat2_polarizes_WAVE
be39ba21245a9b532a70954a38139976a2355a7d
[ "MIT" ]
null
null
null
"""Functions for segmenting images.""" from .interface import ( interface_endpoints_mask, interface_endpoints_coords, interface_shape_edge_method, trim_interface, refine_junction, edge_between_neighbors, ) from .timelapse import ( segment_epithelium_timelapse, largest_object_mask_timelapse, segment_hemijunctions_timelapse, ) from .tissue import ( epithelium_watershed, largest_object_mask, select_border_adjacent, select_in_field, select_mask_adjacent, segment_hemijunctions, cell_edges_mask, cell_interiors_mask, cell_vertices_mask, neighbor_array_nr, ) __all__ = [ "interface_endpoints_mask", "interface_endpoints_coords", "interface_shape_edge_method", "trim_interface", "refine_junction", "edge_between_neighbors", "segment_epithelium_timelapse", "largest_object_mask_timelapse", "segment_hemijunctions_timelapse", "epithelium_watershed", "largest_object_mask", "select_border_adjacent", "select_in_field", "select_mask_adjacent", "segment_hemijunctions", "cell_edges_mask", "cell_interiors_mask", "cell_vertices_mask", "neighbor_array_nr" ]
24.04
38
0.742928
124
1,202
6.58871
0.314516
0.088127
0.083231
0.075887
0.895961
0.895961
0.895961
0.895961
0.895961
0.895961
0
0
0.178869
1,202
49
39
24.530612
0.827761
0.026622
0
0
0
0
0.345361
0.197595
0
0
0
0
0
1
0
false
0
0.065217
0
0.065217
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
3cc05d6b6890f5cd2771c0bf4e20222a059024a6
31
py
Python
snippets/python/automation/beep.py
c6401/Snippets
a88d97005658eeda99f1a2766e3d069a64e142cb
[ "MIT" ]
null
null
null
snippets/python/automation/beep.py
c6401/Snippets
a88d97005658eeda99f1a2766e3d069a64e142cb
[ "MIT" ]
null
null
null
snippets/python/automation/beep.py
c6401/Snippets
a88d97005658eeda99f1a2766e3d069a64e142cb
[ "MIT" ]
null
null
null
def beep(): print('\007')
7.75
17
0.483871
4
31
3.75
1
0
0
0
0
0
0
0
0
0
0
0.130435
0.258065
31
3
18
10.333333
0.521739
0
0
0
0
0
0.133333
0
0
0
0
0
0
1
0.5
true
0
0
0
0.5
0.5
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
1
0
6
3ce5fd9779d7d65bfe16fb75039683fefaa02664
36
py
Python
src/cryptos/__init__.py
villoro/airflow_tasks
81bd892744a9bbbf6e01903649b6c3786a955a5a
[ "MIT" ]
null
null
null
src/cryptos/__init__.py
villoro/airflow_tasks
81bd892744a9bbbf6e01903649b6c3786a955a5a
[ "MIT" ]
4
2020-10-09T15:59:09.000Z
2020-11-18T08:34:44.000Z
src/cryptos/__init__.py
villoro/airflow_tasks
81bd892744a9bbbf6e01903649b6c3786a955a5a
[ "MIT" ]
null
null
null
from .process import update_cryptos
18
35
0.861111
5
36
6
1
0
0
0
0
0
0
0
0
0
0
0
0.111111
36
1
36
36
0.9375
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
a70d88641d80e3344d467bea499761a70a1897c3
21,155
py
Python
tests/test_mc_cnn.py
CNES/Pandora_MCCNN
54d4423f88d31831065ff28ccb5affb724239988
[ "Apache-2.0" ]
3
2021-07-20T09:41:56.000Z
2021-12-13T08:29:43.000Z
tests/test_mc_cnn.py
qfardet/Pandora_MCCNN
0bd26d78f2f4dc1d8571f2cdf47e327dc1628c9e
[ "Apache-2.0" ]
null
null
null
tests/test_mc_cnn.py
qfardet/Pandora_MCCNN
0bd26d78f2f4dc1d8571f2cdf47e327dc1628c9e
[ "Apache-2.0" ]
2
2021-07-09T15:08:05.000Z
2022-01-20T16:27:03.000Z
#!/usr/bin/env python # coding: utf8 # # Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES). # # This file is part of PANDORA_MCCNN # # https://github.com/CNES/Pandora_MCCNN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module contains functions to test the cost volume create by mc_cnn """ import unittest import numpy as np import torch import torch.nn as nn from mc_cnn.run import computes_cost_volume_mc_cnn_fast from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer from mc_cnn.dataset_generator.middlebury_generator import MiddleburyGenerator from mc_cnn.dataset_generator.datas_fusion_contest_generator import DataFusionContestGenerator # pylint: disable=no-self-use class TestMCCNN(unittest.TestCase): """ TestMCCNN class allows to test the cost volume create by mc_cnn """ def setUp(self): """ Method called to prepare the test fixture """ self.ref_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) + 1 self.ref_img_1 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_2 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) - 1 def test_computes_cost_volume_mc_cnn_fast(self): """ " Test the computes_cost_volume_mc_cnn_fast function """ # create reference and secondary features ref_feature = torch.randn((64, 4, 4), dtype=torch.float64) sec_features = torch.randn((64, 4, 4), dtype=torch.float64) cos = nn.CosineSimilarity(dim=0, eps=1e-6) # Create the ground truth cost volume (row, col, disp) cv_gt = np.full((4, 4, 5), np.nan) # disparity -2 cv_gt[:, 2:, 0] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 1] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :, 2] = cos(ref_feature[:, :, :], sec_features[:, :, :]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3, 3] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 4] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() # The minus sign converts the similarity score to a matching cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -2, 2) # Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_negative_disp(self): """ " Test the computes_cost_volume_mc_cnn_fast function with negative disparities """ # create reference and secondary features ref_feature = torch.randn((64, 4, 4), dtype=torch.float64) sec_features = torch.randn((64, 4, 4), dtype=torch.float64) cos = nn.CosineSimilarity(dim=0, eps=1e-6) # Create the ground truth cost volume (row, col, disp) cv_gt = np.full((4, 4, 4), np.nan) # disparity -4 # all nan # disparity -3 cv_gt[:, 3:, 1] = cos(ref_feature[:, :, 3:], sec_features[:, :, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:, 2] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 3] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # The minus sign converts the similarity score to a matching cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -4, -1) # Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_positive_disp(self): """ " Test the computes_cost_volume_mc_cnn_fast function with positive disparities """ # create reference and secondary features ref_feature = torch.randn((64, 4, 4), dtype=torch.float64) sec_features = torch.randn((64, 4, 4), dtype=torch.float64) cos = nn.CosineSimilarity(dim=0, eps=1e-6) # Create the ground truth cost volume (row, col, disp) cv_gt = np.full((4, 4, 4), np.nan) # disparity 1 cv_gt[:, :3, 0] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 1] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1, 2] = cos(ref_feature[:, :, :1], sec_features[:, :, 3:]).cpu().detach().numpy() # disparity 4 # all nan # The minus sign converts the similarity score to a matching cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, 1, 4) # Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def sad_cost(self, ref_features, sec_features): """ Useful to test the computes_cost_volume_mc_cnn_accurate function """ return torch.sum(abs(ref_features[0, :, :, :] - sec_features[0, :, :, :]), dim=0) def test_computes_cost_volume_mc_cnn_accurate(self): """ " Test the computes_cost_volume_mc_cnn_accurate function """ # create reference and secondary features ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64) sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64) # Create the ground truth cost volume (row, col, disp) cv_gt = np.full((4, 4, 5), np.nan) # disparity -2 cv_gt[:, 2:, 0] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 1] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :, 2] = self.sad_cost(ref_feature[:, :, :, :], sec_features[:, :, :, :]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3, 3] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 4] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() # The minus sign converts the similarity score to a matching cost cv_gt *= -1 acc = AccMcCnnInfer() # Because input shape of nn.Conv2d is (Batch_size, Channel, H, W), we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -2, 2, self.sad_cost) # Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accuratenegative_disp(self): """ " Test the computes_cost_volume_mc_cnn_accurate function with negative disparities """ # create reference and secondary features ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64) sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64) # Create the ground truth cost volume (row, col, disp) cv_gt = np.full((4, 4, 4), np.nan) # disparity -4 # all nan # disparity -3 cv_gt[:, 3:, 1] = self.sad_cost(ref_feature[:, :, :, 3:], sec_features[:, :, :, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:, 2] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 3] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() # The minus sign converts the similarity score to a matching cost cv_gt *= -1 acc = AccMcCnnInfer() # Because input shape of nn.Conv2d is (Batch_size, Channel, H, W), we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -4, -1, self.sad_cost) # Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accurate_positive_disp(self): """ " Test the computes_cost_volume_mc_cnn_accurate function with positive disparities """ # create reference and secondary features ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64) sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64) # Create the ground truth cost volume (row, col, disp) cv_gt = np.full((4, 4, 4), np.nan) # disparity 1 cv_gt[:, :3, 0] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 1] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1, 2] = self.sad_cost(ref_feature[:, :, :, :1], sec_features[:, :, :, 3:]).cpu().detach().numpy() # disparity 4 # all nan # The minus sign converts the similarity score to a matching cost cv_gt *= -1 acc = AccMcCnnInfer() # Because input shape of nn.Conv2d is (Batch_size, Channel, H, W), we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, 1, 4, self.sad_cost) # Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) # pylint: disable=invalid-name # -> because changing the name here loses the reference to the actual name of the checked function def test_MiddleburyGenerator(self): """ test the function MiddleburyGenerator """ # Script use to create images_middlebury and samples_middlebury : # pylint: disable=pointless-string-statement """ # shape 1, 2, 13, 13 : 1 exposures, 2 = left and right images image_pairs_0 = np.zeros((1, 2, 13, 13)) # left image_pairs_0[0, 0, :, :] = np.tile(np.arange(13), (13, 1)) # right image_pairs_0[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1 = np.zeros((1, 2, 13, 13)) image_pairs_1[0, 0, :, :] = np.tile(np.arange(13), (13, 1)) image_pairs_1[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) - 1 img_file = h5py.File('images_middlebury.hdf5', 'w') img_0 = [image_pairs_0] grp = img_file.create_group(str(0)) # 1 illumination for light in range(len(img_0)): dset = grp.create_dataset(str(light), data=img_0[light]) img_1 = [image_pairs_1] grp = img_file.create_group(str(1)) for light in range(len(img_1)): dset = grp.create_dataset(str(light), data=img_1[light]) sampl_file = h5py.File('sample_middlebury.hdf5', 'w') # disparity of image_pairs_0 x0 = np.array([[0., 5., 6., 1.] [0., 7., 7., 1.]]) # disparity of image_pairs_1 x1 = np.array([[ 1., 7., 5., -1.] [ 0., 0., 0., 0.]]) sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1), data=x1) """ # Positive disparity cfg = { "data_augmentation": False, "dataset_neg_low": 1, "dataset_neg_high": 1, "dataset_pos": 0, "augmentation_param": { "vertical_disp": 0, "scale": 0.8, "hscale": 0.8, "hshear": 0.1, "trans": 0, "rotate": 28, "brightness": 1.3, "contrast": 1.1, "d_hscale": 0.9, "d_hshear": 0.3, "d_vtrans": 1, "d_rotate": 3, "d_brightness": 0.7, "d_contrast": 1.1, }, } training_loader = MiddleburyGenerator("tests/sample_middlebury.hdf5", "tests/images_middlebury.hdf5", cfg) # Patch of shape 3, 11, 11 # With the firt dimension = left patch, right positive patch, right negative patch patch = training_loader.__getitem__(0) x_ref_patch = 6 y_ref_patch = 5 patch_size = 5 gt_ref_patch = self.ref_img_0[ y_ref_patch - patch_size : y_ref_patch + patch_size + 1, x_ref_patch - patch_size : x_ref_patch + patch_size + 1, ] # disp = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp = 1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 5 gt_sec_pos_patch = self.sec_img_0[ y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1, x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1, ] # dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) dataset_neg = 1 x_sec_neg_patch = x_ref_patch - disp + dataset_neg y_sec_neg_patch = 5 gt_sec_neg_patch = self.sec_img_0[ y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1, x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1, ] gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0) # Check if the calculated patch is equal to the ground truth (same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) # negative disparity patch = training_loader.__getitem__(2) x_ref_patch = 5 y_ref_patch = 7 patch_size = 5 gt_ref_patch = self.ref_img_0[ y_ref_patch - patch_size : y_ref_patch + patch_size + 1, x_ref_patch - patch_size : x_ref_patch + patch_size + 1, ] # disp = -1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp = -1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 5 gt_sec_pos_patch = self.sec_img_0[ y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1, x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1, ] # dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) dataset_neg = 1 x_sec_neg_patch = x_ref_patch - disp + dataset_neg y_sec_neg_patch = 5 gt_sec_neg_patch = self.sec_img_0[ y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1, x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1, ] gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0) # Check if the calculated patch is equal to the ground truth (same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) # pylint: disable=invalid-name # -> because changing the name here loses the reference to the actual name of the checked function def test_DataFusionContestGenerator(self): """ test the function DataFusionContestGenerator """ # pylint: disable=pointless-string-statement """ # Script use to create images_middlebury and samples_middlebury : # shape 2, 13, 13 : 2 = left and right images, row, col image_pairs_0 = np.zeros((2, 13, 13)) # left image_pairs_0[0, :, :] = np.tile(np.arange(13), (13, 1)) # right image_pairs_0[1, :, :] = np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1 = np.zeros((2, 13, 13)) image_pairs_1[0, :, :] = np.tile(np.arange(13), (13, 1)) image_pairs_1[1, :, :] = np.tile(np.arange(13), (13, 1)) - 1 img_file = h5py.File('images_dfc.hdf5', 'w') img_file.create_dataset(str(0), data=image_pairs_0) img_file.create_dataset(str(1), data=image_pairs_1) sampl_file = h5py.File('sample_dfc.hdf5', 'w') # disparity of image_pairs_0 x0 = np.array([[0., 5., 6., 1.], [0., 7., 7., 1.]]) # disparity of image_pairs_1 x1 = np.array([[ 1., 7., 5., -1.], [ 0., 0., 0., 0.]]) sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1), data=x1) """ # Positive disparity cfg = { "data_augmentation": False, "dataset_neg_low": 1, "dataset_neg_high": 1, "dataset_pos": 0, "vertical_disp": 0, "augmentation_param": { "scale": 0.8, "hscale": 0.8, "hshear": 0.1, "trans": 0, "rotate": 28, "brightness": 1.3, "contrast": 1.1, "d_hscale": 0.9, "d_hshear": 0.3, "d_vtrans": 1, "d_rotate": 3, "d_brightness": 0.7, "d_contrast": 1.1, }, } training_loader = DataFusionContestGenerator("tests/sample_dfc.hdf5", "tests/images_dfc.hdf5", cfg) # Patch of shape 3, 11, 11 # With the firt dimension = left patch, right positive patch, right negative patch patch = training_loader.__getitem__(0) x_ref_patch = 6 y_ref_patch = 5 patch_size = 5 gt_ref_patch = self.ref_img_0[ y_ref_patch - patch_size : y_ref_patch + patch_size + 1, x_ref_patch - patch_size : x_ref_patch + patch_size + 1, ] # disp = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp = 1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 5 gt_sec_pos_patch = self.sec_img_0[ y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1, x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1, ] # dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) dataset_neg = 1 x_sec_neg_patch = x_ref_patch - disp + dataset_neg y_sec_neg_patch = 5 gt_sec_neg_patch = self.sec_img_0[ y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1, x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1, ] gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0) # Check if the calculated patch is equal to the ground truth (same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) # negative disparity patch = training_loader.__getitem__(2) x_ref_patch = 5 y_ref_patch = 7 patch_size = 5 gt_ref_patch = self.ref_img_1[ y_ref_patch - patch_size : y_ref_patch + patch_size + 1, x_ref_patch - patch_size : x_ref_patch + patch_size + 1, ] # disp = -1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp = -1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 7 gt_sec_pos_patch = self.sec_img_2[ y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1, x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1, ] # dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) dataset_neg = 1 x_sec_neg_patch = x_ref_patch - disp + dataset_neg y_sec_neg_patch = 7 gt_sec_neg_patch = self.sec_img_2[ y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1, x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1, ] gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0) # Check if the calculated patch is equal to the ground truth (same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) if __name__ == "__main__": unittest.main()
40.918762
116
0.597636
3,049
21,155
3.878649
0.090521
0.039574
0.056824
0.030441
0.869948
0.8494
0.83198
0.825216
0.807374
0.789024
0
0.042419
0.277901
21,155
516
117
40.998062
0.731736
0.256535
0
0.668103
0
0
0.037748
0.007612
0
0
0
0
0.043103
1
0.043103
false
0
0.034483
0
0.086207
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
5966d63840b7ab8028ae290541bcf4513c66c13a
211
py
Python
tests/conftest.py
Agilicus/copper-sdk
dfdecd4aa76bdd47661fdd4bfada7781f8eae835
[ "MIT" ]
4
2021-01-03T07:40:01.000Z
2021-09-03T09:21:02.000Z
tests/conftest.py
Agilicus/copper-sdk
dfdecd4aa76bdd47661fdd4bfada7781f8eae835
[ "MIT" ]
5
2020-09-03T17:28:13.000Z
2021-10-04T22:47:23.000Z
tests/conftest.py
Agilicus/copper-sdk
dfdecd4aa76bdd47661fdd4bfada7781f8eae835
[ "MIT" ]
4
2021-01-07T05:30:49.000Z
2021-09-13T08:08:54.000Z
import pytest from copper_sdk import COPPER_API_TOKEN, COPPER_API_EMAIL from copper_sdk.copper import Copper @pytest.fixture(scope='session') def copper(): return Copper(COPPER_API_TOKEN, COPPER_API_EMAIL)
26.375
57
0.824645
32
211
5.125
0.40625
0.219512
0.158537
0.243902
0.341463
0.341463
0
0
0
0
0
0
0.104265
211
7
58
30.142857
0.867725
0
0
0
0
0
0.033175
0
0
0
0
0
0
1
0.166667
true
0
0.5
0.166667
0.833333
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
1
0
0
0
6
59a2b7f390bd55399c067382bffd0b88305d91cf
3,770
py
Python
pirates/leveleditor/worldData/CaveBTemplate.py
itsyaboyrocket/pirates
6ca1e7d571c670b0d976f65e608235707b5737e3
[ "BSD-3-Clause" ]
3
2021-02-25T06:38:13.000Z
2022-03-22T07:00:15.000Z
pirates/leveleditor/worldData/CaveBTemplate.py
itsyaboyrocket/pirates
6ca1e7d571c670b0d976f65e608235707b5737e3
[ "BSD-3-Clause" ]
null
null
null
pirates/leveleditor/worldData/CaveBTemplate.py
itsyaboyrocket/pirates
6ca1e7d571c670b0d976f65e608235707b5737e3
[ "BSD-3-Clause" ]
1
2021-02-25T06:38:17.000Z
2021-02-25T06:38:17.000Z
# uncompyle6 version 3.2.0 # Python bytecode 2.4 (62061) # Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)] # Embedded file name: pirates.leveleditor.worldData.CaveBTemplate from pandac.PandaModules import Point3, VBase3 objectStruct = {'Objects': {'1172185213.66sdnaik': {'Type': 'Island Game Area', 'Name': 'CaveBTemplate', 'File': '', 'Instanced': True, 'Objects': {'1172185301.05sdnaik': {'Type': 'Locator Node', 'Name': 'portal_interior_1', 'Hpr': VBase3(-92.814, 0.0, 0.0), 'Pos': Point3(408.102, 203.835, 1.938), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1172185301.08sdnaik': {'Type': 'Locator Node', 'Name': 'portal_interior_2', 'Hpr': VBase3(-0.234, -0.244, 0.739), 'Pos': Point3(-535.085, 236.444, 77.638), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1172893180.14kmuller': {'Type': 'Tunnel Cap', 'Hpr': VBase3(-89.933, 0.0, 0.0), 'Pos': Point3(-530.764, 233.107, 82.679), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/tunnels/tunnelcap_cave_interior'}}, '1172893192.18kmuller': {'Type': 'Tunnel Cap', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-476.043, 262.701, 122.229), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/tunnels/tunnelcap_cave_interior'}}, '1172893216.81kmuller': {'Type': 'Tunnel Cap', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-436.771, 259.368, 146.301), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/tunnels/tunnelcap_cave_interior'}}, '1172893544.75kmuller': {'Type': 'Tunnel Cap', 'Hpr': VBase3(-29.142, 0.38, 0.0), 'Pos': Point3(408.785, 196.489, 3.052), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/tunnels/tunnelcap_cave_interior'}}, '1176755520.41dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '120.0000', 'DropOff': '6.8182', 'Flickering': False, 'Hpr': VBase3(-110.238, -3.38, 94.315), 'Intensity': '1.5758', 'LightType': 'SPOT', 'Pos': Point3(-538.19, 242.893, 99.248), 'Visual': {'Color': (1, 1, 1, 1), 'Model': 'models/props/light_tool_bulb'}}, '1176755691.11dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '120.0000', 'DropOff': '2.7273', 'Flickering': False, 'Hpr': VBase3(42.452, 40.037, -92.62), 'Intensity': '1.4545', 'LightType': 'SPOT', 'Pos': Point3(-301.72, -166.094, 66.363), 'Visual': {'Color': (1, 1, 1, 1), 'Model': 'models/props/light_tool_bulb'}}, '1176756704.88dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '60.0000', 'DropOff': '0.0000', 'Flickering': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Intensity': '0.1515', 'LightType': 'AMBIENT', 'Pos': Point3(66.477, -201.119, 35.177), 'Visual': {'Color': (1, 1, 1, 1), 'Model': 'models/props/light_tool_bulb'}}}, 'Visual': {'Model': 'models/caves/cave_b_zero'}}}, 'Node Links': [], 'Layers': {}, 'ObjectIds': {'1172185213.66sdnaik': '["Objects"]["1172185213.66sdnaik"]', '1172185301.05sdnaik': '["Objects"]["1172185213.66sdnaik"]["Objects"]["1172185301.05sdnaik"]', '1172185301.08sdnaik': '["Objects"]["1172185213.66sdnaik"]["Objects"]["1172185301.08sdnaik"]', '1172893180.14kmuller': '["Objects"]["1172185213.66sdnaik"]["Objects"]["1172893180.14kmuller"]', '1172893192.18kmuller': '["Objects"]["1172185213.66sdnaik"]["Objects"]["1172893192.18kmuller"]', '1172893216.81kmuller': '["Objects"]["1172185213.66sdnaik"]["Objects"]["1172893216.81kmuller"]', '1172893544.75kmuller': '["Objects"]["1172185213.66sdnaik"]["Objects"]["1172893544.75kmuller"]', '1176755520.41dzlu': '["Objects"]["1172185213.66sdnaik"]["Objects"]["1176755520.41dzlu"]', '1176755691.11dzlu': '["Objects"]["1172185213.66sdnaik"]["Objects"]["1176755691.11dzlu"]', '1176756704.88dzlu': '["Objects"]["1172185213.66sdnaik"]["Objects"]["1176756704.88dzlu"]'}}
628.333333
3,495
0.659416
513
3,770
4.807018
0.352827
0.017843
0.019465
0.019465
0.360097
0.301298
0.248175
0.231955
0.212084
0.202758
0
0.270348
0.077719
3,770
6
3,495
628.333333
0.438884
0.05809
0
0
0
0
0.564421
0.254863
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
1
1
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
59b9ab7aa44e85318936fa3ee35b19c5aa8d531f
824
py
Python
qork/easy.py
flipcoder/qork
86f10f0db2edc82786516fd30bbd9f046b1a27aa
[ "MIT" ]
3
2020-03-19T06:31:32.000Z
2021-08-24T19:19:50.000Z
qork/easy.py
flipcoder/qork
86f10f0db2edc82786516fd30bbd9f046b1a27aa
[ "MIT" ]
null
null
null
qork/easy.py
flipcoder/qork
86f10f0db2edc82786516fd30bbd9f046b1a27aa
[ "MIT" ]
null
null
null
#!/usr/bin/python from collections import defaultdict from qork.signal import Signal from qork.reactive import * APP = None def qork_app(a=None): global APP if a is None: return APP APP = a return APP def cache(*args, **kwargs): return APP.cache(*args, **kwargs) def add(*args, **kwargs): return APP.add(*args, **kwargs) def find(*args, **kwargs): return APP.world.find(*args, **kwargs) def find_one(*args, **kwargs): return APP.world.find(*args, one=True, **kwargs) def remove(*args, **kwargs): return APP.remove(*args, **kwargs) def create(*args, **kwargs): return APP.create(*args, **kwargs) def clear(): return APP.scene.clear() def play(*args, **kwargs): return APP.play(*args, **kwargs) # def music(fn): # return APP.add(fn, loop=True)
15.846154
52
0.634709
118
824
4.415254
0.288136
0.24952
0.214971
0.255278
0.122841
0.122841
0.122841
0
0
0
0
0
0.20267
824
51
53
16.156863
0.792998
0.078884
0
0.076923
0
0
0
0
0
0
0
0
0
1
0.346154
false
0
0.115385
0.307692
0.846154
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
ab7f0aaf5c29c103ff087961487efc01b174671f
82
py
Python
paths_cli/__init__.py
dwhswenson/openpathsampling-cli
9d20fb069b08ea516174607fe4464fb5f9a74b12
[ "MIT" ]
1
2020-02-11T13:31:53.000Z
2020-02-11T13:31:53.000Z
paths_cli/__init__.py
dwhswenson/openpathsampling-cli
9d20fb069b08ea516174607fe4464fb5f9a74b12
[ "MIT" ]
null
null
null
paths_cli/__init__.py
dwhswenson/openpathsampling-cli
9d20fb069b08ea516174607fe4464fb5f9a74b12
[ "MIT" ]
null
null
null
from .cli import OpenPathSamplingCLI from . import commands from . import version
20.5
36
0.817073
10
82
6.7
0.6
0.298507
0
0
0
0
0
0
0
0
0
0
0.146341
82
3
37
27.333333
0.957143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
ab902eebf9ec437998e96a8a77820cb0063c7fc1
35
py
Python
piwebasync/websockets/__init__.py
newvicx/piwebasync
fc0d159aa4b99667777f428a090fe7a102481fea
[ "MIT" ]
null
null
null
piwebasync/websockets/__init__.py
newvicx/piwebasync
fc0d159aa4b99667777f428a090fe7a102481fea
[ "MIT" ]
2
2022-03-02T17:42:21.000Z
2022-03-29T19:24:01.000Z
piwebasync/websockets/__init__.py
newvicx/piwebasync
fc0d159aa4b99667777f428a090fe7a102481fea
[ "MIT" ]
null
null
null
from .client import WebsocketClient
35
35
0.885714
4
35
7.75
1
0
0
0
0
0
0
0
0
0
0
0
0.085714
35
1
35
35
0.96875
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
abb9f37a6fa130250d38acc4dde44fb5a49531ff
131
py
Python
orb_simulator/orbsim_language/orbsim_ast/bitwise_shift_right_node.py
dmguezjaviersnet/IA-Sim-Comp-Project
8165b9546efc45f98091a3774e2dae4f45942048
[ "MIT" ]
1
2022-01-19T22:49:09.000Z
2022-01-19T22:49:09.000Z
orb_simulator/orbsim_language/orbsim_ast/bitwise_shift_right_node.py
dmguezjaviersnet/IA-Sim-Comp-Project
8165b9546efc45f98091a3774e2dae4f45942048
[ "MIT" ]
15
2021-11-10T14:25:02.000Z
2022-02-12T19:17:11.000Z
orb_simulator/orbsim_language/orbsim_ast/bitwise_shift_right_node.py
dmguezjaviersnet/IA-Sim-Comp-Project
8165b9546efc45f98091a3774e2dae4f45942048
[ "MIT" ]
null
null
null
from orbsim_language.orbsim_ast.binary_expr_node import BinaryExprNode # >> class BitwiseShiftRightNode(BinaryExprNode): pass
21.833333
70
0.832061
14
131
7.5
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.10687
131
5
71
26.2
0.897436
0.015267
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
abce9487f37e68f9114d032ce10077abc1f38211
29
py
Python
library/source1/mdl/v44/__init__.py
anderlli0053/SourceIO
3c0c4839939ce698439987ac52154f89ee2f5341
[ "MIT" ]
null
null
null
library/source1/mdl/v44/__init__.py
anderlli0053/SourceIO
3c0c4839939ce698439987ac52154f89ee2f5341
[ "MIT" ]
null
null
null
library/source1/mdl/v44/__init__.py
anderlli0053/SourceIO
3c0c4839939ce698439987ac52154f89ee2f5341
[ "MIT" ]
null
null
null
from .mdl_file import MdlV44
14.5
28
0.827586
5
29
4.6
1
0
0
0
0
0
0
0
0
0
0
0.08
0.137931
29
1
29
29
0.84
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
abd455a86a3ffc917698b585fae0d39293200f58
40
py
Python
problems/DivisionTwo_Practice/solution.py
Pactionly/SpringComp2019
f684b84375b0d1fe98f3dffac6d2fac26ba6e2f1
[ "MIT" ]
1
2020-04-21T00:42:47.000Z
2020-04-21T00:42:47.000Z
problems/DivisionTwo_Practice/solution.py
Pactionly/SpringComp2019
f684b84375b0d1fe98f3dffac6d2fac26ba6e2f1
[ "MIT" ]
null
null
null
problems/DivisionTwo_Practice/solution.py
Pactionly/SpringComp2019
f684b84375b0d1fe98f3dffac6d2fac26ba6e2f1
[ "MIT" ]
1
2020-04-23T02:09:45.000Z
2020-04-23T02:09:45.000Z
print("Hello World from Division Two!")
20
39
0.75
6
40
5
1
0
0
0
0
0
0
0
0
0
0
0
0.125
40
1
40
40
0.857143
0
0
0
0
0
0.75
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
abe2175d966ffb568f2530920d141df8286c646b
10,354
py
Python
sjb/hack/determine_install_upgrade_version_test.py
brenton/aos-cd-jobs
34e427bb7091c52791bc93a34f062e57dc005082
[ "Apache-2.0" ]
45
2017-05-09T15:49:06.000Z
2021-11-07T19:48:35.000Z
sjb/hack/determine_install_upgrade_version_test.py
brenton/aos-cd-jobs
34e427bb7091c52791bc93a34f062e57dc005082
[ "Apache-2.0" ]
1,313
2017-01-19T13:40:43.000Z
2022-03-30T14:25:44.000Z
sjb/hack/determine_install_upgrade_version_test.py
brenton/aos-cd-jobs
34e427bb7091c52791bc93a34f062e57dc005082
[ "Apache-2.0" ]
165
2017-01-17T22:19:04.000Z
2022-03-02T12:15:13.000Z
import unittest from determine_install_upgrade_version import * class TestPackage(object): def __init__(self, name, version, release, epoch, vra, pkgtup): self.name = name self.version = version self.release = release self.epoch = epoch self.vra = vra self.pkgtup = pkgtup def __eq__(self, other): return self.__dict__ == other.__dict__ @classmethod def create_test_packages(self, test_pkgs): test_pkgs_objs = [] for pkg in test_pkgs: pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_arch = rpmutils.splitFilename(pkg) pkg_vra = pkg_version + "-" + pkg_release + "." + pkg_arch pkg_tup = (pkg_name , pkg_arch, pkg_epoch, pkg_version, pkg_release) test_pkgs_objs.append(TestPackage(pkg_name, pkg_version, pkg_release, pkg_epoch, pkg_vra, pkg_tup)) return test_pkgs_objs class RemoveDuplicatePackages(unittest.TestCase): "Test for `determine_install_upgrade_version.py`" def test_removing_single_duplicate_package(self): """ when is multiple duplicate packages, return only one """ test_pkgs = ["origin-1.4.1-1.el7.x86_64", "origin-1.5.0-0.4.el7.x86_64", "origin-1.5.0-0.4.el7.x86_64"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) def test_removing_no_duplicate_package(self): """ when there is no duplicate package, return the single one """ test_pkgs = ["origin-1.4.1-1.el7.x86_64", "origin-1.5.0-0.4.el7.x86_64"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) result_pkgs_objs = test_pkgs_objs[:2] self.assertEqual(remove_duplicate_pkgs(test_pkgs_objs), result_pkgs_objs) class GetMatchingVersionTestCase(unittest.TestCase): "Test for `determine_install_upgrade_version.py`" def test_get_matching_versions(self): """ when only one matching version exist and its pre-release, it is returned """ test_pkgs = ["origin-1.4.1-1.el7.x86_64", "origin-1.5.0-0.4.el7.x86_64"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_single_pre_release(self): """ when only one pre-release version exist, it is returned """ test_pkgs = ["origin-1.5.0-0.4.el7.x86_64"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7']) def test_with_multiple_pre_release(self): """ when only one pre-release version exist, it is returned """ test_pkgs = ["origin-1.5.0-0.4.el7.x86_64", "origin-1.5.2-0.1.el7.x86_64"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ['1.5.0-0.4.el7', '1.5.2-0.1.el7']) def test_with_single_release(self): """ when both release and pre-release versions exist, only release versions are returned """ test_pkgs = ["origin-1.5.0-0.4.el7.x86_64", "origin-1.5.0-1.1.el7.x86_64"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ["1.5.0-1.1.el7"]) def test_with_muptiple_release(self): """ when both release and pre-release versions exist, only release version is returned """ test_pkgs = ["origin-1.5.0-0.4.el7.x86_64", "origin-1.5.0-1.1.el7.x86_64", "origin-1.5.2-1.1.el7.x86_64"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertEqual(get_matching_versions('origin', test_pkgs_objs, '1.5'), ["1.5.0-1.1.el7", "1.5.2-1.1.el7"]) def test_with_no_matches(self): test_pkgs = ["origin-1.2.0-0.4.el7.x86_64", "origin-1.3.0-1.1.el7.x86_64", "origin-1.4.2-1.1.el7.x86_64"] test_pkgs_objs = TestPackage.create_test_packages(test_pkgs) self.assertRaises(SystemExit, get_matching_versions, 'origin', test_pkgs_objs, '1.5') class DetermineSearchVersionTestCase(unittest.TestCase): "Test for `determine_install_upgrade_version.py`" def test_origin_with_standard_versioning_schema(self): """ when the origin version is higher then the first version of the new origin versioning schema - origin-3.6 """ self.assertEqual(determine_search_versions("origin", "3.7.0"), ("3.6", "3.7")) def test_origin_with_short_standard_versioning_schema(self): """ when the origin version is in short format and higher then the first version of the new origin versioning schema - origin-3.6 """ self.assertEqual(determine_search_versions("origin", "3.7"), ("3.6", "3.7")) def test_origin_with_standard_to_legacy_versioning_schema(self): """ when the origin version is the first from the new origin versioning schema - origin-3.6 """ self.assertEqual(determine_search_versions("origin", "3.6.0"), ("1.5", "3.6")) def test_origin_with_short_standard_to_legacy_versioning_schema(self): """ when the origin version is in short format and first from the new origin versioning schema - origin-3.6 """ self.assertEqual(determine_search_versions("origin", "3.6"), ("1.5", "3.6")) def test_origin_with_legacy_schema(self): """ when the origin version is in the old versioning schema """ self.assertEqual(determine_search_versions("origin", "1.5.0"), ("1.4", "1.5")) def test_origin_with_short_legacy_schema(self): """ when the origin version is in short and old versioning schema """ self.assertEqual(determine_search_versions("origin", "1.5"), ("1.4", "1.5")) def test_openshift_ansible_with_standard_versioning_schema(self): """ when openshift-ansible, which doesnt have different versioning schema, is in 3.7 version """ self.assertEqual(determine_search_versions("openshift-ansible", "3.7.0"), ("3.6", "3.7")) def test_openshift_ansible_with_standard_to_legacy_versioning_schema(self): """ when openshift-ansible, which doesnt have different versioning schema is in 3.6 version """ self.assertEqual(determine_search_versions("openshift-ansible", "3.6.0"), ("3.5", "3.6")) def test_openshift_ansible_with_short_standard_to_legacy_versioning_schema(self): """ when openshift-ansible, which doesnt have different versioning schema, is in short format and in 3.6 version """ self.assertEqual(determine_search_versions("openshift-ansible", "3.6"), ("3.5", "3.6")) def test_openshift_ansible_with_legacy_versioning_schema(self): """ when openshift-ansible, which doesnt have different versioning schema is in 3.4 version """ self.assertEqual(determine_search_versions("openshift-ansible", "3.5.0"), ("3.4", "3.5")) class SchemaChangeCheckTestCase(unittest.TestCase): "Test for `determine_install_upgrade_version.py`" def test_origin_package_with_new_schema(self): """ when origin package is in 3.6 version """ self.assertEqual(schema_change_check("origin", "3", "6"), "3.6") def test_origin_package_with_old_schema(self): """ when origin package is in 1.5 version """ self.assertEqual(schema_change_check("origin", "3", "5"), "1.5") def test_non_origin_package_with_new_schema(self): """ when origin package is in 3.6 version """ self.assertEqual(schema_change_check("openshift-ansible", "3", "6"), "3.6") def test_non_origin_package_with_old_schema(self): """ when origin package is in 3.5 version """ self.assertEqual(schema_change_check("openshift-ansible", "3", "5"), "3.5") class GetLastVersionTestCase(unittest.TestCase): "Test for `determine_install_upgrade_version.py`" def test_with_multiple_matching_release_versions(self): """ when multiple matching version are present in released versions """ matching_versions = ["1.2.0-1.el7", "1.2.2-1.el7", "1.2.5-1.el7"] install_version = "1.2.5-1.el7" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_release_version(self): """ when only a single matching version is present in released versions """ matching_versions = ["1.5.0-1.4.el7"] install_version = "1.5.0-1.4.el7" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_multiple_matching_pre_release_versions(self): """ when multiple matching pre-release version are present in pre-released versions """ matching_versions = ["1.2.0-0.el7", "1.2.2-0.el7", "1.2.5-0.el7"] install_version = "1.2.5-0.el7" self.assertEqual(get_last_version(matching_versions), install_version) def test_with_single_matching_pre_release_version(self): """ when only single matching pre-release version is present in pre-released versions """ matching_versions = ["1.5.0-0.4.el7"] install_version = "1.5.0-0.4.el7" self.assertEqual(get_last_version(matching_versions), install_version) class SortPackagesTestCase(unittest.TestCase): "Test for `determine_install_upgrade_version.py`" def test_sort_packages_with_exceptional_origin_pkg(self): """ when sorting origin packages with exceptional origin-3.6.0-0.0.alpha.0.1 package """ test_pkgs = ["origin-3.6.0-0.0.alpha.0.1.el7", "origin-3.6.0-0.alpha.0.2.el7"] properly_sorted_pkgs = ["origin-3.6.0-0.alpha.0.2.el7"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_same_minor_version(self): """ when sorting origin packages within the same minor version """ test_pkgs = ["origin-1.5.1-1.el7", "origin-1.5.0-1.el7"] properly_sorted_pkgs = ["origin-1.5.0-1.el7", "origin-1.5.1-1.el7"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) def test_sort_packages_with_different_minor_version(self): """ when sorting origin packages with different minor version """ test_pkgs = ["origin-1.5.1-1.el7", "origin-1.4.0-1.el7"] properly_sorted_pkgs = ["origin-1.4.0-1.el7", "origin-1.5.1-1.el7"] test_pkgs_obj = TestPackage.create_test_packages(test_pkgs) properly_sorted_pkgs_obj = TestPackage.create_test_packages(properly_sorted_pkgs) sorted_test_pkgs_obj = sort_pkgs(test_pkgs_obj) self.assertEqual(sorted_test_pkgs_obj, properly_sorted_pkgs_obj) if __name__ == '__main__': unittest.main()
51.257426
135
0.758934
1,662
10,354
4.443442
0.075812
0.061747
0.008937
0.011374
0.845904
0.811239
0.768179
0.736899
0.693433
0.60826
0
0.050999
0.106143
10,354
202
136
51.257426
0.746948
0.225903
0
0.283582
0
0
0.191665
0.097615
0
0
0
0
0.216418
1
0.238806
false
0
0.014925
0.007463
0.320896
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
6
abef62196bfa039670d1b1408e40569773e9a263
38,079
py
Python
instances/passenger_demand/pas-20210421-2109-int16e/45.py
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
[ "BSD-3-Clause" ]
null
null
null
instances/passenger_demand/pas-20210421-2109-int16e/45.py
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
[ "BSD-3-Clause" ]
null
null
null
instances/passenger_demand/pas-20210421-2109-int16e/45.py
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
[ "BSD-3-Clause" ]
null
null
null
""" PASSENGERS """ numPassengers = 3619 passenger_arriving = ( (4, 8, 8, 4, 6, 0, 7, 6, 4, 7, 5, 0), # 0 (8, 5, 5, 1, 0, 0, 5, 9, 6, 2, 4, 0), # 1 (4, 14, 10, 6, 1, 0, 8, 9, 5, 4, 0, 0), # 2 (8, 9, 4, 4, 3, 0, 7, 13, 8, 2, 2, 0), # 3 (1, 17, 7, 2, 0, 0, 13, 10, 1, 5, 2, 0), # 4 (4, 8, 7, 6, 3, 0, 7, 5, 6, 6, 1, 0), # 5 (1, 7, 7, 4, 2, 0, 5, 7, 7, 6, 1, 0), # 6 (6, 14, 7, 5, 1, 0, 7, 8, 11, 4, 2, 0), # 7 (5, 10, 9, 4, 3, 0, 2, 10, 6, 6, 1, 0), # 8 (2, 12, 8, 2, 2, 0, 14, 10, 5, 9, 2, 0), # 9 (5, 13, 12, 7, 4, 0, 9, 15, 10, 3, 2, 0), # 10 (5, 13, 8, 6, 2, 0, 9, 10, 9, 2, 3, 0), # 11 (4, 6, 5, 3, 3, 0, 7, 8, 7, 8, 0, 0), # 12 (6, 13, 8, 9, 4, 0, 10, 9, 8, 9, 3, 0), # 13 (6, 8, 9, 6, 1, 0, 7, 6, 2, 7, 4, 0), # 14 (3, 9, 8, 8, 3, 0, 8, 7, 5, 7, 3, 0), # 15 (4, 10, 10, 3, 5, 0, 10, 8, 10, 6, 2, 0), # 16 (1, 8, 9, 5, 0, 0, 12, 10, 3, 14, 3, 0), # 17 (5, 8, 6, 3, 2, 0, 7, 9, 9, 8, 0, 0), # 18 (1, 6, 7, 5, 6, 0, 3, 10, 5, 7, 2, 0), # 19 (5, 12, 10, 6, 4, 0, 8, 15, 4, 4, 3, 0), # 20 (6, 8, 9, 4, 2, 0, 7, 10, 4, 5, 0, 0), # 21 (7, 5, 13, 9, 4, 0, 6, 9, 10, 1, 4, 0), # 22 (4, 7, 6, 6, 3, 0, 12, 14, 5, 4, 1, 0), # 23 (4, 14, 13, 3, 2, 0, 13, 8, 7, 4, 7, 0), # 24 (3, 7, 6, 6, 2, 0, 6, 16, 5, 8, 2, 0), # 25 (3, 11, 7, 6, 3, 0, 8, 12, 6, 7, 1, 0), # 26 (7, 9, 9, 0, 1, 0, 4, 5, 6, 6, 1, 0), # 27 (7, 14, 8, 5, 5, 0, 9, 10, 6, 2, 1, 0), # 28 (8, 12, 5, 2, 3, 0, 5, 9, 7, 3, 2, 0), # 29 (4, 9, 7, 3, 5, 0, 15, 13, 7, 3, 2, 0), # 30 (9, 10, 13, 4, 5, 0, 6, 7, 7, 5, 2, 0), # 31 (6, 11, 8, 7, 2, 0, 11, 7, 4, 7, 4, 0), # 32 (7, 14, 10, 1, 4, 0, 4, 5, 6, 8, 1, 0), # 33 (4, 18, 11, 6, 6, 0, 7, 13, 4, 6, 2, 0), # 34 (6, 9, 9, 5, 3, 0, 5, 16, 12, 3, 3, 0), # 35 (7, 8, 12, 3, 3, 0, 9, 12, 8, 2, 4, 0), # 36 (7, 9, 5, 4, 3, 0, 6, 13, 5, 5, 1, 0), # 37 (8, 10, 4, 5, 3, 0, 7, 12, 7, 2, 2, 0), # 38 (5, 6, 8, 8, 3, 0, 3, 6, 10, 4, 2, 0), # 39 (5, 6, 6, 6, 1, 0, 6, 8, 10, 5, 6, 0), # 40 (8, 7, 8, 2, 2, 0, 9, 6, 5, 5, 4, 0), # 41 (10, 10, 6, 1, 1, 0, 10, 7, 8, 4, 2, 0), # 42 (5, 12, 7, 7, 3, 0, 6, 6, 8, 3, 1, 0), # 43 (2, 9, 5, 3, 4, 0, 2, 7, 8, 4, 4, 0), # 44 (6, 12, 3, 5, 1, 0, 9, 8, 5, 4, 4, 0), # 45 (7, 13, 4, 5, 2, 0, 9, 10, 7, 0, 4, 0), # 46 (10, 10, 6, 3, 3, 0, 7, 14, 5, 4, 3, 0), # 47 (3, 8, 10, 2, 2, 0, 7, 13, 12, 6, 4, 0), # 48 (3, 11, 13, 0, 1, 0, 6, 7, 3, 5, 1, 0), # 49 (5, 13, 8, 5, 1, 0, 6, 10, 5, 6, 2, 0), # 50 (3, 11, 11, 5, 2, 0, 7, 8, 5, 7, 3, 0), # 51 (3, 12, 8, 3, 2, 0, 5, 10, 6, 5, 1, 0), # 52 (5, 14, 10, 5, 4, 0, 7, 7, 7, 6, 1, 0), # 53 (7, 9, 6, 4, 4, 0, 4, 9, 8, 2, 1, 0), # 54 (4, 13, 12, 3, 2, 0, 6, 9, 8, 6, 7, 0), # 55 (2, 10, 10, 5, 2, 0, 6, 8, 5, 5, 2, 0), # 56 (8, 15, 8, 2, 2, 0, 10, 15, 6, 6, 3, 0), # 57 (6, 9, 4, 4, 2, 0, 6, 11, 7, 4, 3, 0), # 58 (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 59 ) station_arriving_intensity = ( (4.239442493415277, 10.874337121212122, 12.79077763496144, 10.138043478260869, 11.428846153846154, 7.610869565217392), # 0 (4.27923521607648, 10.995266557940518, 12.859864860039991, 10.194503019323673, 11.51450641025641, 7.608275422705315), # 1 (4.318573563554774, 11.114402244668911, 12.927312196515281, 10.249719806763286, 11.598358974358975, 7.60560193236715), # 2 (4.357424143985952, 11.231615625000002, 12.993070372750644, 10.303646739130434, 11.680326923076926, 7.60284945652174), # 3 (4.395753565505805, 11.346778142536477, 13.057090117109396, 10.356236714975847, 11.760333333333335, 7.600018357487922), # 4 (4.433528436250122, 11.459761240881035, 13.11932215795487, 10.407442632850241, 11.838301282051281, 7.597108997584541), # 5 (4.470715364354698, 11.570436363636365, 13.179717223650389, 10.457217391304349, 11.914153846153846, 7.594121739130435), # 6 (4.507280957955322, 11.678674954405162, 13.238226042559269, 10.50551388888889, 11.987814102564105, 7.591056944444445), # 7 (4.543191825187787, 11.784348456790122, 13.294799343044847, 10.552285024154589, 12.059205128205129, 7.587914975845411), # 8 (4.578414574187884, 11.88732831439394, 13.34938785347044, 10.597483695652175, 12.12825, 7.584696195652175), # 9 (4.612915813091406, 11.987485970819305, 13.401942302199371, 10.64106280193237, 12.194871794871796, 7.581400966183574), # 10 (4.646662150034143, 12.084692869668913, 13.452413417594972, 10.682975241545895, 12.25899358974359, 7.578029649758455), # 11 (4.679620193151888, 12.178820454545454, 13.500751928020566, 10.723173913043478, 12.320538461538462, 7.574582608695652), # 12 (4.71175655058043, 12.26974016905163, 13.546908561839473, 10.761611714975846, 12.37942948717949, 7.5710602053140095), # 13 (4.743037830455566, 12.357323456790127, 13.590834047415022, 10.798241545893719, 12.435589743589743, 7.567462801932367), # 14 (4.773430640913081, 12.441441761363635, 13.632479113110538, 10.833016304347826, 12.488942307692309, 7.563790760869566), # 15 (4.802901590088772, 12.521966526374861, 13.671794487289347, 10.86588888888889, 12.539410256410257, 7.560044444444445), # 16 (4.831417286118428, 12.598769195426486, 13.708730898314768, 10.896812198067634, 12.586916666666667, 7.556224214975846), # 17 (4.8589443371378405, 12.671721212121213, 13.74323907455013, 10.925739130434785, 12.631384615384619, 7.552330434782609), # 18 (4.8854493512828014, 12.740694020061728, 13.775269744358756, 10.952622584541063, 12.67273717948718, 7.5483634661835755), # 19 (4.910898936689104, 12.805559062850728, 13.804773636103969, 10.9774154589372, 12.710897435897436, 7.544323671497584), # 20 (4.935259701492538, 12.866187784090906, 13.831701478149103, 11.000070652173914, 12.74578846153846, 7.540211413043479), # 21 (4.958498253828894, 12.922451627384962, 13.856003998857469, 11.020541062801932, 12.777333333333331, 7.5360270531400975), # 22 (4.980581201833967, 12.97422203633558, 13.877631926592404, 11.038779589371982, 12.805455128205129, 7.531770954106282), # 23 (5.001475153643547, 13.021370454545455, 13.896535989717222, 11.054739130434783, 12.830076923076923, 7.52744347826087), # 24 (5.0211467173934246, 13.063768325617284, 13.91266691659526, 11.068372584541065, 12.851121794871794, 7.523044987922706), # 25 (5.039562501219393, 13.101287093153758, 13.925975435589832, 11.079632850241545, 12.86851282051282, 7.518575845410628), # 26 (5.056689113257243, 13.133798200757575, 13.936412275064265, 11.088472826086958, 12.88217307692308, 7.514036413043479), # 27 (5.072493161642767, 13.161173092031426, 13.943928163381893, 11.09484541062802, 12.89202564102564, 7.509427053140097), # 28 (5.086941254511755, 13.183283210578004, 13.948473828906026, 11.09870350241546, 12.89799358974359, 7.504748128019324), # 29 (5.1000000000000005, 13.200000000000001, 13.950000000000001, 11.100000000000001, 12.9, 7.5), # 30 (5.112219245524297, 13.213886079545453, 13.948855917874395, 11.099765849673204, 12.89926985815603, 7.4934020156588375), # 31 (5.124174680306906, 13.227588636363638, 13.945456038647343, 11.099067973856208, 12.897095035460993, 7.483239613526571), # 32 (5.135871675191815, 13.241105965909092, 13.93984891304348, 11.097913235294119, 12.893498936170213, 7.469612293853072), # 33 (5.147315601023018, 13.254436363636366, 13.93208309178744, 11.096308496732028, 12.888504964539008, 7.452619556888223), # 34 (5.158511828644501, 13.267578124999998, 13.922207125603865, 11.094260620915033, 12.882136524822696, 7.432360902881893), # 35 (5.169465728900256, 13.280529545454549, 13.91026956521739, 11.091776470588236, 12.874417021276598, 7.408935832083959), # 36 (5.180182672634271, 13.293288920454547, 13.896318961352657, 11.088862908496733, 12.865369858156027, 7.382443844744294), # 37 (5.190668030690537, 13.305854545454546, 13.8804038647343, 11.08552679738562, 12.855018439716313, 7.352984441112776), # 38 (5.200927173913044, 13.318224715909091, 13.862572826086955, 11.081775, 12.843386170212765, 7.32065712143928), # 39 (5.21096547314578, 13.330397727272729, 13.842874396135267, 11.077614379084968, 12.830496453900707, 7.285561385973679), # 40 (5.220788299232737, 13.342371874999998, 13.821357125603866, 11.073051797385622, 12.816372695035462, 7.247796734965852), # 41 (5.230401023017903, 13.354145454545458, 13.798069565217393, 11.068094117647059, 12.801038297872342, 7.207462668665667), # 42 (5.239809015345269, 13.365716761363636, 13.773060265700483, 11.06274820261438, 12.784516666666667, 7.164658687323005), # 43 (5.249017647058824, 13.377084090909092, 13.746377777777779, 11.05702091503268, 12.76683120567376, 7.119484291187739), # 44 (5.258032289002557, 13.388245738636364, 13.718070652173916, 11.050919117647059, 12.748005319148938, 7.072038980509745), # 45 (5.266858312020461, 13.399200000000002, 13.688187439613529, 11.044449673202614, 12.72806241134752, 7.022422255538898), # 46 (5.275501086956522, 13.409945170454547, 13.656776690821255, 11.037619444444445, 12.707025886524825, 6.970733616525071), # 47 (5.283965984654732, 13.420479545454548, 13.623886956521739, 11.030435294117646, 12.68491914893617, 6.9170725637181425), # 48 (5.292258375959079, 13.430801420454543, 13.589566787439615, 11.022904084967323, 12.66176560283688, 6.861538597367982), # 49 (5.300383631713555, 13.440909090909088, 13.553864734299518, 11.015032679738564, 12.63758865248227, 6.804231217724471), # 50 (5.308347122762149, 13.450800852272728, 13.516829347826087, 11.006827941176471, 12.612411702127659, 6.7452499250374816), # 51 (5.316154219948849, 13.460475, 13.47850917874396, 10.998296732026144, 12.58625815602837, 6.684694219556889), # 52 (5.3238102941176475, 13.469929829545457, 13.438952777777779, 10.98944591503268, 12.559151418439718, 6.622663601532567), # 53 (5.331320716112533, 13.479163636363635, 13.398208695652173, 10.980282352941177, 12.531114893617023, 6.559257571214393), # 54 (5.338690856777493, 13.488174715909091, 13.356325483091787, 10.970812908496733, 12.502171985815604, 6.494575628852241), # 55 (5.3459260869565215, 13.496961363636363, 13.313351690821257, 10.961044444444445, 12.472346099290782, 6.428717274695986), # 56 (5.353031777493607, 13.505521875000003, 13.269335869565218, 10.950983823529413, 12.441660638297872, 6.361782008995502), # 57 (5.360013299232737, 13.513854545454544, 13.224326570048309, 10.940637908496733, 12.410139007092198, 6.293869332000667), # 58 (0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59 ) passenger_arriving_acc = ( (4, 8, 8, 4, 6, 0, 7, 6, 4, 7, 5, 0), # 0 (12, 13, 13, 5, 6, 0, 12, 15, 10, 9, 9, 0), # 1 (16, 27, 23, 11, 7, 0, 20, 24, 15, 13, 9, 0), # 2 (24, 36, 27, 15, 10, 0, 27, 37, 23, 15, 11, 0), # 3 (25, 53, 34, 17, 10, 0, 40, 47, 24, 20, 13, 0), # 4 (29, 61, 41, 23, 13, 0, 47, 52, 30, 26, 14, 0), # 5 (30, 68, 48, 27, 15, 0, 52, 59, 37, 32, 15, 0), # 6 (36, 82, 55, 32, 16, 0, 59, 67, 48, 36, 17, 0), # 7 (41, 92, 64, 36, 19, 0, 61, 77, 54, 42, 18, 0), # 8 (43, 104, 72, 38, 21, 0, 75, 87, 59, 51, 20, 0), # 9 (48, 117, 84, 45, 25, 0, 84, 102, 69, 54, 22, 0), # 10 (53, 130, 92, 51, 27, 0, 93, 112, 78, 56, 25, 0), # 11 (57, 136, 97, 54, 30, 0, 100, 120, 85, 64, 25, 0), # 12 (63, 149, 105, 63, 34, 0, 110, 129, 93, 73, 28, 0), # 13 (69, 157, 114, 69, 35, 0, 117, 135, 95, 80, 32, 0), # 14 (72, 166, 122, 77, 38, 0, 125, 142, 100, 87, 35, 0), # 15 (76, 176, 132, 80, 43, 0, 135, 150, 110, 93, 37, 0), # 16 (77, 184, 141, 85, 43, 0, 147, 160, 113, 107, 40, 0), # 17 (82, 192, 147, 88, 45, 0, 154, 169, 122, 115, 40, 0), # 18 (83, 198, 154, 93, 51, 0, 157, 179, 127, 122, 42, 0), # 19 (88, 210, 164, 99, 55, 0, 165, 194, 131, 126, 45, 0), # 20 (94, 218, 173, 103, 57, 0, 172, 204, 135, 131, 45, 0), # 21 (101, 223, 186, 112, 61, 0, 178, 213, 145, 132, 49, 0), # 22 (105, 230, 192, 118, 64, 0, 190, 227, 150, 136, 50, 0), # 23 (109, 244, 205, 121, 66, 0, 203, 235, 157, 140, 57, 0), # 24 (112, 251, 211, 127, 68, 0, 209, 251, 162, 148, 59, 0), # 25 (115, 262, 218, 133, 71, 0, 217, 263, 168, 155, 60, 0), # 26 (122, 271, 227, 133, 72, 0, 221, 268, 174, 161, 61, 0), # 27 (129, 285, 235, 138, 77, 0, 230, 278, 180, 163, 62, 0), # 28 (137, 297, 240, 140, 80, 0, 235, 287, 187, 166, 64, 0), # 29 (141, 306, 247, 143, 85, 0, 250, 300, 194, 169, 66, 0), # 30 (150, 316, 260, 147, 90, 0, 256, 307, 201, 174, 68, 0), # 31 (156, 327, 268, 154, 92, 0, 267, 314, 205, 181, 72, 0), # 32 (163, 341, 278, 155, 96, 0, 271, 319, 211, 189, 73, 0), # 33 (167, 359, 289, 161, 102, 0, 278, 332, 215, 195, 75, 0), # 34 (173, 368, 298, 166, 105, 0, 283, 348, 227, 198, 78, 0), # 35 (180, 376, 310, 169, 108, 0, 292, 360, 235, 200, 82, 0), # 36 (187, 385, 315, 173, 111, 0, 298, 373, 240, 205, 83, 0), # 37 (195, 395, 319, 178, 114, 0, 305, 385, 247, 207, 85, 0), # 38 (200, 401, 327, 186, 117, 0, 308, 391, 257, 211, 87, 0), # 39 (205, 407, 333, 192, 118, 0, 314, 399, 267, 216, 93, 0), # 40 (213, 414, 341, 194, 120, 0, 323, 405, 272, 221, 97, 0), # 41 (223, 424, 347, 195, 121, 0, 333, 412, 280, 225, 99, 0), # 42 (228, 436, 354, 202, 124, 0, 339, 418, 288, 228, 100, 0), # 43 (230, 445, 359, 205, 128, 0, 341, 425, 296, 232, 104, 0), # 44 (236, 457, 362, 210, 129, 0, 350, 433, 301, 236, 108, 0), # 45 (243, 470, 366, 215, 131, 0, 359, 443, 308, 236, 112, 0), # 46 (253, 480, 372, 218, 134, 0, 366, 457, 313, 240, 115, 0), # 47 (256, 488, 382, 220, 136, 0, 373, 470, 325, 246, 119, 0), # 48 (259, 499, 395, 220, 137, 0, 379, 477, 328, 251, 120, 0), # 49 (264, 512, 403, 225, 138, 0, 385, 487, 333, 257, 122, 0), # 50 (267, 523, 414, 230, 140, 0, 392, 495, 338, 264, 125, 0), # 51 (270, 535, 422, 233, 142, 0, 397, 505, 344, 269, 126, 0), # 52 (275, 549, 432, 238, 146, 0, 404, 512, 351, 275, 127, 0), # 53 (282, 558, 438, 242, 150, 0, 408, 521, 359, 277, 128, 0), # 54 (286, 571, 450, 245, 152, 0, 414, 530, 367, 283, 135, 0), # 55 (288, 581, 460, 250, 154, 0, 420, 538, 372, 288, 137, 0), # 56 (296, 596, 468, 252, 156, 0, 430, 553, 378, 294, 140, 0), # 57 (302, 605, 472, 256, 158, 0, 436, 564, 385, 298, 143, 0), # 58 (302, 605, 472, 256, 158, 0, 436, 564, 385, 298, 143, 0), # 59 ) passenger_arriving_rate = ( (4.239442493415277, 8.699469696969697, 7.674466580976864, 4.055217391304347, 2.2857692307692306, 0.0, 7.610869565217392, 9.143076923076922, 6.082826086956521, 5.1163110539845755, 2.174867424242424, 0.0), # 0 (4.27923521607648, 8.796213246352414, 7.715918916023995, 4.077801207729468, 2.3029012820512818, 0.0, 7.608275422705315, 9.211605128205127, 6.116701811594203, 5.1439459440159965, 2.1990533115881035, 0.0), # 1 (4.318573563554774, 8.891521795735128, 7.7563873179091685, 4.099887922705314, 2.3196717948717946, 0.0, 7.60560193236715, 9.278687179487179, 6.1498318840579715, 5.170924878606112, 2.222880448933782, 0.0), # 2 (4.357424143985952, 8.9852925, 7.795842223650386, 4.121458695652173, 2.336065384615385, 0.0, 7.60284945652174, 9.34426153846154, 6.18218804347826, 5.197228149100257, 2.246323125, 0.0), # 3 (4.395753565505805, 9.07742251402918, 7.834254070265637, 4.142494685990338, 2.352066666666667, 0.0, 7.600018357487922, 9.408266666666668, 6.213742028985508, 5.222836046843758, 2.269355628507295, 0.0), # 4 (4.433528436250122, 9.167808992704828, 7.8715932947729215, 4.1629770531400965, 2.367660256410256, 0.0, 7.597108997584541, 9.470641025641024, 6.244465579710145, 5.247728863181948, 2.291952248176207, 0.0), # 5 (4.470715364354698, 9.25634909090909, 7.907830334190233, 4.182886956521739, 2.382830769230769, 0.0, 7.594121739130435, 9.531323076923076, 6.274330434782609, 5.271886889460156, 2.3140872727272725, 0.0), # 6 (4.507280957955322, 9.34293996352413, 7.942935625535561, 4.2022055555555555, 2.397562820512821, 0.0, 7.591056944444445, 9.590251282051284, 6.303308333333334, 5.295290417023708, 2.3357349908810323, 0.0), # 7 (4.543191825187787, 9.427478765432097, 7.976879605826908, 4.220914009661835, 2.4118410256410256, 0.0, 7.587914975845411, 9.647364102564103, 6.3313710144927535, 5.317919737217938, 2.3568696913580243, 0.0), # 8 (4.578414574187884, 9.509862651515151, 8.009632712082263, 4.23899347826087, 2.4256499999999996, 0.0, 7.584696195652175, 9.702599999999999, 6.358490217391305, 5.339755141388175, 2.377465662878788, 0.0), # 9 (4.612915813091406, 9.589988776655444, 8.041165381319622, 4.256425120772947, 2.438974358974359, 0.0, 7.581400966183574, 9.755897435897436, 6.384637681159421, 5.360776920879748, 2.397497194163861, 0.0), # 10 (4.646662150034143, 9.66775429573513, 8.071448050556983, 4.273190096618357, 2.4517987179487175, 0.0, 7.578029649758455, 9.80719487179487, 6.409785144927537, 5.380965367037988, 2.4169385739337823, 0.0), # 11 (4.679620193151888, 9.743056363636363, 8.100451156812339, 4.289269565217391, 2.4641076923076923, 0.0, 7.574582608695652, 9.85643076923077, 6.433904347826087, 5.400300771208226, 2.4357640909090907, 0.0), # 12 (4.71175655058043, 9.815792135241303, 8.128145137103683, 4.304644685990338, 2.475885897435898, 0.0, 7.5710602053140095, 9.903543589743592, 6.456967028985507, 5.418763424735789, 2.4539480338103257, 0.0), # 13 (4.743037830455566, 9.8858587654321, 8.154500428449014, 4.3192966183574875, 2.4871179487179482, 0.0, 7.567462801932367, 9.948471794871793, 6.478944927536231, 5.4363336189660085, 2.471464691358025, 0.0), # 14 (4.773430640913081, 9.953153409090907, 8.179487467866322, 4.33320652173913, 2.4977884615384616, 0.0, 7.563790760869566, 9.991153846153846, 6.499809782608695, 5.452991645244214, 2.488288352272727, 0.0), # 15 (4.802901590088772, 10.017573221099887, 8.203076692373608, 4.346355555555555, 2.507882051282051, 0.0, 7.560044444444445, 10.031528205128204, 6.519533333333333, 5.468717794915738, 2.504393305274972, 0.0), # 16 (4.831417286118428, 10.079015356341188, 8.22523853898886, 4.358724879227053, 2.517383333333333, 0.0, 7.556224214975846, 10.069533333333332, 6.538087318840581, 5.483492359325907, 2.519753839085297, 0.0), # 17 (4.8589443371378405, 10.13737696969697, 8.245943444730077, 4.370295652173914, 2.5262769230769235, 0.0, 7.552330434782609, 10.105107692307694, 6.55544347826087, 5.4972956298200515, 2.5343442424242424, 0.0), # 18 (4.8854493512828014, 10.192555216049382, 8.265161846615253, 4.381049033816424, 2.534547435897436, 0.0, 7.5483634661835755, 10.138189743589743, 6.571573550724637, 5.510107897743501, 2.5481388040123454, 0.0), # 19 (4.910898936689104, 10.244447250280581, 8.282864181662381, 4.3909661835748794, 2.542179487179487, 0.0, 7.544323671497584, 10.168717948717948, 6.58644927536232, 5.5219094544415865, 2.5611118125701453, 0.0), # 20 (4.935259701492538, 10.292950227272724, 8.299020886889462, 4.400028260869565, 2.5491576923076917, 0.0, 7.540211413043479, 10.196630769230767, 6.600042391304348, 5.53268059125964, 2.573237556818181, 0.0), # 21 (4.958498253828894, 10.337961301907969, 8.313602399314481, 4.408216425120773, 2.555466666666666, 0.0, 7.5360270531400975, 10.221866666666664, 6.6123246376811595, 5.542401599542987, 2.584490325476992, 0.0), # 22 (4.980581201833967, 10.379377629068463, 8.326579155955441, 4.415511835748792, 2.5610910256410255, 0.0, 7.531770954106282, 10.244364102564102, 6.623267753623189, 5.551052770636961, 2.5948444072671157, 0.0), # 23 (5.001475153643547, 10.417096363636363, 8.337921593830332, 4.421895652173912, 2.5660153846153846, 0.0, 7.52744347826087, 10.264061538461538, 6.632843478260869, 5.558614395886888, 2.6042740909090907, 0.0), # 24 (5.0211467173934246, 10.451014660493826, 8.347600149957156, 4.427349033816426, 2.5702243589743587, 0.0, 7.523044987922706, 10.280897435897435, 6.641023550724639, 5.565066766638103, 2.6127536651234564, 0.0), # 25 (5.039562501219393, 10.481029674523006, 8.355585261353898, 4.431853140096617, 2.5737025641025637, 0.0, 7.518575845410628, 10.294810256410255, 6.647779710144927, 5.570390174235932, 2.6202574186307515, 0.0), # 26 (5.056689113257243, 10.507038560606059, 8.361847365038559, 4.435389130434783, 2.5764346153846156, 0.0, 7.514036413043479, 10.305738461538462, 6.653083695652175, 5.574564910025706, 2.6267596401515148, 0.0), # 27 (5.072493161642767, 10.52893847362514, 8.366356898029135, 4.437938164251207, 2.578405128205128, 0.0, 7.509427053140097, 10.313620512820512, 6.656907246376812, 5.5775712653527565, 2.632234618406285, 0.0), # 28 (5.086941254511755, 10.546626568462402, 8.369084297343615, 4.439481400966184, 2.579598717948718, 0.0, 7.504748128019324, 10.318394871794872, 6.659222101449276, 5.57938953156241, 2.6366566421156006, 0.0), # 29 (5.1000000000000005, 10.56, 8.370000000000001, 4.44, 2.58, 0.0, 7.5, 10.32, 6.660000000000001, 5.58, 2.64, 0.0), # 30 (5.112219245524297, 10.571108863636361, 8.369313550724637, 4.439906339869282, 2.5798539716312057, 0.0, 7.4934020156588375, 10.319415886524823, 6.659859509803923, 5.579542367149758, 2.6427772159090903, 0.0), # 31 (5.124174680306906, 10.582070909090909, 8.367273623188405, 4.439627189542483, 2.5794190070921985, 0.0, 7.483239613526571, 10.317676028368794, 6.659440784313724, 5.578182415458937, 2.6455177272727273, 0.0), # 32 (5.135871675191815, 10.592884772727274, 8.363909347826088, 4.439165294117647, 2.5786997872340423, 0.0, 7.469612293853072, 10.314799148936169, 6.658747941176471, 5.575939565217392, 2.6482211931818185, 0.0), # 33 (5.147315601023018, 10.603549090909091, 8.359249855072465, 4.438523398692811, 2.5777009929078014, 0.0, 7.452619556888223, 10.310803971631206, 6.657785098039217, 5.572833236714976, 2.6508872727272728, 0.0), # 34 (5.158511828644501, 10.614062499999998, 8.353324275362318, 4.437704248366013, 2.576427304964539, 0.0, 7.432360902881893, 10.305709219858157, 6.65655637254902, 5.568882850241546, 2.6535156249999994, 0.0), # 35 (5.169465728900256, 10.624423636363638, 8.346161739130434, 4.436710588235294, 2.5748834042553193, 0.0, 7.408935832083959, 10.299533617021277, 6.655065882352941, 5.564107826086956, 2.6561059090909094, 0.0), # 36 (5.180182672634271, 10.634631136363637, 8.337791376811595, 4.435545163398693, 2.573073971631205, 0.0, 7.382443844744294, 10.29229588652482, 6.65331774509804, 5.558527584541062, 2.6586577840909094, 0.0), # 37 (5.190668030690537, 10.644683636363636, 8.32824231884058, 4.4342107189542475, 2.5710036879432625, 0.0, 7.352984441112776, 10.28401475177305, 6.651316078431372, 5.5521615458937195, 2.661170909090909, 0.0), # 38 (5.200927173913044, 10.654579772727272, 8.317543695652173, 4.43271, 2.568677234042553, 0.0, 7.32065712143928, 10.274708936170212, 6.649065, 5.545029130434782, 2.663644943181818, 0.0), # 39 (5.21096547314578, 10.664318181818182, 8.305724637681159, 4.431045751633987, 2.566099290780141, 0.0, 7.285561385973679, 10.264397163120565, 6.646568627450981, 5.537149758454106, 2.6660795454545454, 0.0), # 40 (5.220788299232737, 10.673897499999997, 8.29281427536232, 4.429220718954248, 2.563274539007092, 0.0, 7.247796734965852, 10.253098156028368, 6.643831078431373, 5.5285428502415455, 2.6684743749999993, 0.0), # 41 (5.230401023017903, 10.683316363636365, 8.278841739130435, 4.427237647058823, 2.560207659574468, 0.0, 7.207462668665667, 10.240830638297872, 6.640856470588235, 5.519227826086957, 2.6708290909090913, 0.0), # 42 (5.239809015345269, 10.692573409090908, 8.26383615942029, 4.4250992810457515, 2.556903333333333, 0.0, 7.164658687323005, 10.227613333333332, 6.637648921568627, 5.509224106280192, 2.673143352272727, 0.0), # 43 (5.249017647058824, 10.701667272727272, 8.247826666666667, 4.422808366013072, 2.5533662411347517, 0.0, 7.119484291187739, 10.213464964539007, 6.634212549019608, 5.498551111111111, 2.675416818181818, 0.0), # 44 (5.258032289002557, 10.71059659090909, 8.23084239130435, 4.420367647058823, 2.5496010638297872, 0.0, 7.072038980509745, 10.198404255319149, 6.630551470588235, 5.487228260869566, 2.6776491477272724, 0.0), # 45 (5.266858312020461, 10.71936, 8.212912463768117, 4.417779869281045, 2.5456124822695037, 0.0, 7.022422255538898, 10.182449929078015, 6.626669803921568, 5.475274975845411, 2.67984, 0.0), # 46 (5.275501086956522, 10.727956136363636, 8.194066014492753, 4.415047777777778, 2.5414051773049646, 0.0, 6.970733616525071, 10.165620709219858, 6.6225716666666665, 5.462710676328501, 2.681989034090909, 0.0), # 47 (5.283965984654732, 10.736383636363637, 8.174332173913044, 4.412174117647059, 2.536983829787234, 0.0, 6.9170725637181425, 10.147935319148935, 6.618261176470588, 5.449554782608695, 2.6840959090909093, 0.0), # 48 (5.292258375959079, 10.744641136363633, 8.15374007246377, 4.409161633986929, 2.5323531205673757, 0.0, 6.861538597367982, 10.129412482269503, 6.613742450980394, 5.435826714975845, 2.6861602840909082, 0.0), # 49 (5.300383631713555, 10.752727272727268, 8.13231884057971, 4.406013071895425, 2.527517730496454, 0.0, 6.804231217724471, 10.110070921985816, 6.6090196078431385, 5.421545893719807, 2.688181818181817, 0.0), # 50 (5.308347122762149, 10.760640681818181, 8.110097608695652, 4.4027311764705885, 2.5224823404255314, 0.0, 6.7452499250374816, 10.089929361702126, 6.604096764705883, 5.406731739130435, 2.6901601704545453, 0.0), # 51 (5.316154219948849, 10.768379999999999, 8.087105507246376, 4.399318692810457, 2.517251631205674, 0.0, 6.684694219556889, 10.069006524822695, 6.5989780392156865, 5.391403671497584, 2.6920949999999997, 0.0), # 52 (5.3238102941176475, 10.775943863636364, 8.063371666666667, 4.395778366013072, 2.5118302836879436, 0.0, 6.622663601532567, 10.047321134751774, 6.593667549019608, 5.375581111111111, 2.693985965909091, 0.0), # 53 (5.331320716112533, 10.783330909090907, 8.038925217391304, 4.392112941176471, 2.5062229787234043, 0.0, 6.559257571214393, 10.024891914893617, 6.5881694117647065, 5.359283478260869, 2.6958327272727267, 0.0), # 54 (5.338690856777493, 10.790539772727271, 8.013795289855072, 4.388325163398693, 2.5004343971631204, 0.0, 6.494575628852241, 10.001737588652482, 6.58248774509804, 5.342530193236715, 2.697634943181818, 0.0), # 55 (5.3459260869565215, 10.79756909090909, 7.988011014492754, 4.384417777777777, 2.494469219858156, 0.0, 6.428717274695986, 9.977876879432625, 6.576626666666667, 5.325340676328502, 2.6993922727272723, 0.0), # 56 (5.353031777493607, 10.804417500000001, 7.96160152173913, 4.380393529411765, 2.4883321276595742, 0.0, 6.361782008995502, 9.953328510638297, 6.570590294117648, 5.307734347826087, 2.7011043750000003, 0.0), # 57 (5.360013299232737, 10.811083636363634, 7.934595942028984, 4.376255163398692, 2.4820278014184396, 0.0, 6.293869332000667, 9.928111205673758, 6.564382745098039, 5.289730628019323, 2.7027709090909084, 0.0), # 58 (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59 ) passenger_allighting_rate = ( (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 0 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 1 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 2 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 3 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 4 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 5 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 6 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 7 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 8 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 9 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 10 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 11 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 12 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 13 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 14 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 15 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 16 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 17 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 18 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 19 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 20 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 21 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 22 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 23 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 24 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 25 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 26 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 27 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 28 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 29 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 30 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 31 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 32 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 33 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 34 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 35 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 36 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 37 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 38 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 39 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 40 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 41 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 42 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 43 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 44 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 45 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 46 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 47 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 48 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 49 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 50 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 51 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 52 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 53 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 54 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 55 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 56 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 57 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 58 (0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 59 ) """ parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html """ #initial entropy entropy = 258194110137029475889902652135037600173 #index for seed sequence child child_seed_index = ( 1, # 0 44, # 1 )
113.668657
214
0.730455
5,147
38,079
5.401982
0.22926
0.310747
0.246008
0.46612
0.326284
0.325565
0.325565
0.325565
0.325565
0.325565
0
0.820051
0.118543
38,079
334
215
114.008982
0.008312
0.031802
0
0.202532
0
0
0
0
0
0
0
0
0
1
0
false
0.015823
0
0
0
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
1
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
f9fc952d1ccf29274ec0973eb4a467f04bcaa21d
146
py
Python
flog/user/__init__.py
mutalisk999/Flog
5d836e26967b39faebdf2d5a2c558316bf93221b
[ "MIT" ]
1
2020-08-24T03:39:52.000Z
2020-08-24T03:39:52.000Z
flog/user/__init__.py
mutalisk999/Flog
5d836e26967b39faebdf2d5a2c558316bf93221b
[ "MIT" ]
null
null
null
flog/user/__init__.py
mutalisk999/Flog
5d836e26967b39faebdf2d5a2c558316bf93221b
[ "MIT" ]
null
null
null
""" MIT License Copyright (c) 2020 Andy Zhou """ from flask import Blueprint user_bp = Blueprint("user", __name__) from . import views
14.6
38
0.678082
19
146
4.947368
0.789474
0.276596
0
0
0
0
0
0
0
0
0
0.035088
0.219178
146
9
39
16.222222
0.789474
0.273973
0
0
0
0
0.044944
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0.666667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
6
e6583e674ba2e81a101b4e7e7b28bee80e278c5c
35
py
Python
swf/responses/__init__.py
nstott/simpleflow
483602deb745a09b59ad6e24052dd5096c54fad2
[ "MIT" ]
69
2015-02-24T00:49:40.000Z
2022-02-05T02:35:04.000Z
swf/responses/__init__.py
nstott/simpleflow
483602deb745a09b59ad6e24052dd5096c54fad2
[ "MIT" ]
295
2015-02-06T11:02:00.000Z
2022-03-21T11:01:34.000Z
swf/responses/__init__.py
nstott/simpleflow
483602deb745a09b59ad6e24052dd5096c54fad2
[ "MIT" ]
27
2015-08-31T22:14:42.000Z
2022-02-08T07:25:01.000Z
from .base import Response # NOQA
17.5
34
0.742857
5
35
5.2
1
0
0
0
0
0
0
0
0
0
0
0
0.2
35
1
35
35
0.928571
0.114286
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
0510b47d60ee89fe0f4905a512bdc61e7ec08cb3
2,805
py
Python
ML/code/linear_model.py
DistributedML/Biscotti
dfba71b3924e1bafd2ab2545881fb741193f224e
[ "BSD-2-Clause" ]
61
2019-01-13T22:07:00.000Z
2022-02-16T16:53:13.000Z
ML/code/linear_model.py
cm20210602/Biscotti
dfba71b3924e1bafd2ab2545881fb741193f224e
[ "BSD-2-Clause" ]
null
null
null
ML/code/linear_model.py
cm20210602/Biscotti
dfba71b3924e1bafd2ab2545881fb741193f224e
[ "BSD-2-Clause" ]
14
2019-05-26T15:11:39.000Z
2022-03-02T16:10:24.000Z
from __future__ import division import numpy as np import utils import pdb lammy = 0.1 verbose = 1 maxEvals = 10000 X = 0 y = 0 iteration = 1 alpha = 1e-2 d = 0 hist_grad = 0 def init(dataset): global X X = utils.load_dataset(dataset)['X'] global y y = utils.load_dataset(dataset)['y'] global d d = X.shape[1] global hist_grad hist_grad = np.zeros(d) return d def funObj(ww, X, y): xwy = (X.dot(ww) - y) f = 0.5 * xwy.T.dot(xwy) g = X.T.dot(xwy) return f, g def funObjL2(ww, X, y): xwy = (X.dot(ww) - y) f = 0.5 * xwy.T.dot(xwy) + 0.5 * self.lammy * ww.T.dot(ww) g = X.T.dot(xwy) + self.lammy * ww return f, g # Reports the direct change to w, based on the given one. # Batch size could be 1 for SGD, or 0 for full gradient. def privateFun(theta, ww, batch_size=0): global iteration print 'python iteration ' + str(iteration) + ' starting' ww = np.array(ww) # Define constants and params nn, dd = X.shape threshold = int(d * theta) if batch_size > 0 and batch_size < nn: idx = np.random.choice(nn, batch_size, replace=False) else: # Just take the full range idx = range(nn) f, g = funObj(ww, X[idx, :], y[idx]) # AdaGrad global hist_grad hist_grad += g**2 ada_grad = g / (1e-6 + np.sqrt(hist_grad)) # Determine the actual step magnitude delta = -alpha * ada_grad # Weird way to get NON top k values if theta < 1: param_filter = np.argpartition( abs(delta), -threshold)[:d - threshold] delta[param_filter] = 0 w_new = ww + delta f_new, g_new = funObj(w_new, X[idx, :], y[idx]) print 'python iteration ' + str(iteration) + ' ending' iteration = iteration + 1 return delta def privateFunL2(theta, ww, batch_size=0): global iteration print 'python iteration ' + str(iteration) + ' starting' ww = np.array(ww) # Define constants and params nn, dd = X.shape threshold = int(d * theta) if batch_size > 0 and batch_size < nn: idx = np.random.choice(nn, batch_size, replace=False) else: # Just take the full range idx = range(nn) f, g = funObjL2(ww, X[idx, :], y[idx]) # AdaGrad global hist_grad hist_grad += g**2 ada_grad = g / (1e-6 + np.sqrt(hist_grad)) # Determine the actual step magnitude delta = -alpha * ada_grad # Weird way to get NON top k values if theta < 1: param_filter = np.argpartition( abs(delta), -threshold)[:d - threshold] delta[param_filter] = 0 w_new = ww + delta f_new, g_new = funObjL2(w_new, X[idx, :], y[idx]) print 'python iteration ' + str(iteration) + ' ending' iteration = iteration + 1 return delta
21.576923
62
0.596435
434
2,805
3.764977
0.251152
0.044064
0.017136
0.056304
0.734394
0.709914
0.709914
0.709914
0.709914
0.709914
0
0.021923
0.284492
2,805
130
63
21.576923
0.792227
0.13262
0
0.580247
0
0
0.042131
0
0
0
0
0
0
0
null
null
0
0.049383
null
null
0.049383
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
6
057920a1e2854b49360f4c734ccb7bc39d232c72
98
py
Python
lockbot/controllers/ping.py
preyneyv/iot-door-opener
dc84803e8a853dd4db2fbc8310f16381da9dfffa
[ "MIT" ]
null
null
null
lockbot/controllers/ping.py
preyneyv/iot-door-opener
dc84803e8a853dd4db2fbc8310f16381da9dfffa
[ "MIT" ]
null
null
null
lockbot/controllers/ping.py
preyneyv/iot-door-opener
dc84803e8a853dd4db2fbc8310f16381da9dfffa
[ "MIT" ]
null
null
null
from starlette.responses import PlainTextResponse def ping(_): return PlainTextResponse('')
16.333333
49
0.77551
9
98
8.333333
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.142857
98
5
50
19.6
0.892857
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
553e3c05ff43b750f67e560612f2b93db0cef109
94
py
Python
tests/test_dummy.py
rmflight/GOcats
fc7b367583a5a579a76c58a83a37fe13c69ebccc
[ "Unlicense" ]
10
2017-03-31T19:12:22.000Z
2021-09-28T01:29:38.000Z
tests/test_dummy.py
rmflight/GOcats
fc7b367583a5a579a76c58a83a37fe13c69ebccc
[ "Unlicense" ]
8
2018-04-23T15:40:56.000Z
2021-03-31T14:22:06.000Z
tests/test_dummy.py
rmflight/GOcats
fc7b367583a5a579a76c58a83a37fe13c69ebccc
[ "Unlicense" ]
3
2017-04-23T14:15:41.000Z
2021-06-20T18:38:01.000Z
import pytest def test_run_script(): # run 1 # run 2 # assert assert 1 == 1
10.444444
22
0.553191
14
94
3.571429
0.642857
0
0
0
0
0
0
0
0
0
0
0.066667
0.361702
94
8
23
11.75
0.766667
0.191489
0
0
0
0
0
0
0
0
0
0
0.333333
1
0.333333
true
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
55410510df321bd146836470591e53e089a30174
181
py
Python
fjlt/__init__.py
gabobert/fast-jlt
b4f1156fd355ae4dca53b2d661f4bdd5a74fb8fa
[ "MIT" ]
8
2015-09-29T11:41:37.000Z
2022-01-31T17:59:58.000Z
fjlt/__init__.py
gabobert/fast-jlt
b4f1156fd355ae4dca53b2d661f4bdd5a74fb8fa
[ "MIT" ]
1
2017-07-13T11:00:35.000Z
2017-07-14T00:42:32.000Z
fjlt/__init__.py
gabobert/fast-jlt
b4f1156fd355ae4dca53b2d661f4bdd5a74fb8fa
[ "MIT" ]
8
2015-08-14T18:33:38.000Z
2020-02-10T07:56:21.000Z
import os from .version import __version__ def get_include(): ''' Path of cython headers for compiling cython modules ''' return os.path.dirname(os.path.abspath(__file__))
25.857143
63
0.740331
25
181
5
0.72
0.096
0
0
0
0
0
0
0
0
0
0
0.160221
181
6
64
30.166667
0.822368
0.281768
0
0
0
0
0
0
0
0
0
0
0
1
0.25
true
0
0.5
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
55450421321a9d78d6344e925ca710a027620a80
23,700
py
Python
tests/vcf_chunker_test.py
oxfordfun/minos
e7165f1a398b1003e82a8aa00480ef5cd65fa834
[ "MIT" ]
14
2018-01-25T15:20:42.000Z
2022-03-25T07:57:19.000Z
tests/vcf_chunker_test.py
oxfordfun/minos
e7165f1a398b1003e82a8aa00480ef5cd65fa834
[ "MIT" ]
41
2018-01-25T15:47:13.000Z
2021-11-04T10:30:21.000Z
tests/vcf_chunker_test.py
oxfordfun/minos
e7165f1a398b1003e82a8aa00480ef5cd65fa834
[ "MIT" ]
11
2018-01-25T15:11:32.000Z
2021-11-04T08:59:55.000Z
import filecmp import shutil import os import unittest import cluster_vcf_records from minos import vcf_chunker this_dir = os.path.dirname(os.path.abspath(__file__)) data_dir = os.path.join(this_dir, "data", "vcf_chunker") class TestVcfChunker(unittest.TestCase): def test_total_variants_and_alleles_in_vcf_dict(self): """test _total_variants_and_alleles_in_vcf_dict""" class FakeVcf: def __init__(self, alt): self.ALT = alt test_dict = { "chrom1": [FakeVcf("123"), FakeVcf("1"), FakeVcf("123456789")], "chrom2": [FakeVcf("12"), FakeVcf("1234")], } expect_variants = 5 expect_alleles = 24 ( got_variants, got_alleles, ) = vcf_chunker.VcfChunker._total_variants_and_alleles_in_vcf_dict(test_dict) self.assertEqual(expect_variants, got_variants) self.assertEqual(expect_alleles, got_alleles) def test_chunk_end_indexes_from_vcf_record_list(self): """test _chunk_end_indexes_from_vcf_record_list""" record_list = [ cluster_vcf_records.vcf_record.VcfRecord("ref\t1\t.\tA\tG\t.\t.\t.\t."), cluster_vcf_records.vcf_record.VcfRecord( "ref\t2\t.\tC\tT,A,G,TA\t.\t.\t.\t." ), cluster_vcf_records.vcf_record.VcfRecord("ref\t3\t.\tT\tA,C\t.\t.\t.\t."), cluster_vcf_records.vcf_record.VcfRecord( "ref\t5\t.\tAGAGTCACGTA\tG\t.\t.\t.\t." ), cluster_vcf_records.vcf_record.VcfRecord("ref\t18\t.\tA\tG\t.\t.\t.\t."), cluster_vcf_records.vcf_record.VcfRecord("ref\t21\t.\tG\tT\t.\t.\t.\t."), ] self.assertEqual( (0, 0, 1), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_alleles=1 ), ) self.assertEqual( (0, 0, 1), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_alleles=2 ), ) self.assertEqual( (0, 0, 1), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_alleles=3 ), ) self.assertEqual( (0, 0, 1), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_alleles=4 ), ) self.assertEqual( (0, 0, 1), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_alleles=5 ), ) self.assertEqual( (0, 0, 1), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_alleles=6 ), ) self.assertEqual( (0, 1, 2), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_alleles=7 ), ) self.assertEqual( (0, 1, 2), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_alleles=8 ), ) self.assertEqual( (0, 1, 2), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_alleles=9 ), ) self.assertEqual( (0, 2, 2), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_alleles=10 ), ) self.assertEqual( (0, 2, 2), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_alleles=11 ), ) self.assertEqual( (0, 3, 3), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_alleles=12 ), ) self.assertEqual( (0, 0, 1), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_sites=1 ), ) self.assertEqual( (0, 1, 2), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_sites=2 ), ) self.assertEqual( (0, 2, 2), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_sites=3 ), ) self.assertEqual( (0, 3, 3), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_sites=4 ), ) self.assertEqual( (0, 4, 4), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_sites=5 ), ) self.assertEqual( (0, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_sites=6 ), ) self.assertEqual( (0, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_sites=7 ), ) self.assertEqual( (0, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 1, total_sites=8 ), ) self.assertEqual( (0, 0, 2), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 2, total_sites=1 ), ) self.assertEqual( (0, 1, 2), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 2, total_sites=2 ), ) self.assertEqual( (0, 2, 3), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 2, total_sites=3 ), ) self.assertEqual( (0, 3, 3), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 2, total_sites=4 ), ) self.assertEqual( (0, 4, 4), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 2, total_sites=5 ), ) self.assertEqual( (0, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 2, total_sites=6 ), ) self.assertEqual( (0, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 2, total_sites=7 ), ) self.assertEqual( (0, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 2, total_sites=8 ), ) self.assertEqual( (0, 0, 2), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 3, total_sites=1 ), ) self.assertEqual( (0, 1, 3), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 3, total_sites=2 ), ) self.assertEqual( (0, 2, 3), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 3, total_sites=3 ), ) self.assertEqual( (0, 3, 4), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 3, total_sites=4 ), ) self.assertEqual( (0, 4, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 3, total_sites=5 ), ) self.assertEqual( (0, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 3, total_sites=6 ), ) self.assertEqual( (0, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 3, total_sites=7 ), ) self.assertEqual( (0, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 3, total_sites=8 ), ) self.assertEqual( (0, 0, 3), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 4, total_sites=1 ), ) self.assertEqual( (0, 1, 3), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 4, total_sites=2 ), ) self.assertEqual( (0, 2, 3), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 4, total_sites=3 ), ) self.assertEqual( (0, 3, 4), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 4, total_sites=4 ), ) self.assertEqual( (0, 4, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 4, total_sites=5 ), ) self.assertEqual( (0, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 4, total_sites=6 ), ) self.assertEqual( (0, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 4, total_sites=7 ), ) self.assertEqual( (0, 1, 2), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 1, 1, total_sites=1 ), ) self.assertEqual( (0, 1, 2), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 1, 2, total_sites=1 ), ) self.assertEqual( (0, 1, 3), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 1, 3, total_sites=1 ), ) self.assertEqual( (0, 1, 3), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 1, 15, total_sites=1 ), ) self.assertEqual( (0, 1, 4), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 1, 16, total_sites=1 ), ) self.assertEqual( (0, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 1, 1, total_sites=6 ), ) self.assertEqual( (4, 4, 4), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 4, 1, total_sites=1 ), ) self.assertEqual( (4, 4, 4), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 4, 2, total_sites=1 ), ) self.assertEqual( (3, 4, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 4, 3, total_sites=1 ), ) self.assertEqual( (4, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 4, 1, total_sites=2 ), ) self.assertEqual( (5, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 5, 1, total_sites=1 ), ) self.assertEqual( (5, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 5, 1, total_sites=2 ), ) self.assertEqual( (5, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 5, 2, total_sites=2 ), ) self.assertEqual( (4, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 5, 3, total_sites=2 ), ) self.assertEqual( (4, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 5, 4, total_sites=2 ), ) self.assertEqual( (4, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 5, 5, total_sites=2 ), ) self.assertEqual( (3, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 5, 6, total_sites=2 ), ) self.assertEqual( (3, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 5, 7, total_sites=2 ), ) self.assertEqual( (3, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 5, 17, total_sites=2 ), ) self.assertEqual( (2, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 5, 18, total_sites=2 ), ) self.assertEqual( (1, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 5, 19, total_sites=2 ), ) self.assertEqual( (0, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 5, 20, total_sites=2 ), ) self.assertEqual( (0, 5, 5), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 5, 21, total_sites=2 ), ) # These records caused minos error because variant at 800 # was included in the last split file, but the use_end_index was at # position of the variant at 610. So the one at 800 was not getting used. record_list = [ cluster_vcf_records.vcf_record.VcfRecord("ref\t75\t.\tA\tG\t.\t.\t.\t."), cluster_vcf_records.vcf_record.VcfRecord("ref\t150\t.\tG\tA,T\t.\t.\t.\t."), cluster_vcf_records.vcf_record.VcfRecord("ref\t450\t.\tT\tC\t.\t.\t.\t."), cluster_vcf_records.vcf_record.VcfRecord("ref\t610\t.\tA\tG\t.\t.\t.\t."), cluster_vcf_records.vcf_record.VcfRecord("ref\t800\t.\tC\tCA\t.\t.\t.\t."), ] self.assertEqual( (0, 1, 1), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 0, 100, total_sites=2 ), ) self.assertEqual( (2, 3, 3), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 2, 100, total_sites=2 ), ) self.assertEqual( (4, 4, 4), vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list( record_list, 4, 100, total_sites=2 ), ) def test_make_split_files(self): """test make_split_files""" infile = os.path.join(data_dir, "make_split_files.in.vcf") tmp_out = "tmp.vcf_chunker.make_split_files" ref_fa = os.path.join(data_dir, "make_split_files.in.ref.fa") if os.path.exists(tmp_out): shutil.rmtree(tmp_out) vcf1 = cluster_vcf_records.vcf_record.VcfRecord( "ref1\t1\t.\tG\tT\t.\tPASS\t.\t.\t." ) vcf2 = cluster_vcf_records.vcf_record.VcfRecord( "ref1\t2\t.\tC\tT\t.\tPASS\t.\t.\t." ) vcf3 = cluster_vcf_records.vcf_record.VcfRecord( "ref1\t3\t.\tT\tA\t.\tPASS\t.\t.\t." ) vcf4 = cluster_vcf_records.vcf_record.VcfRecord( "ref1\t5\t.\tAGAGTCACGTA\tG\t.\tPASS\t.\t.\t." ) vcf5 = cluster_vcf_records.vcf_record.VcfRecord( "ref1\t18\t.\tA\tG\t.\tPASS\t.\t.\t." ) vcf6 = cluster_vcf_records.vcf_record.VcfRecord( "ref1\t21\t.\tG\tT\t.\tPASS\t.\t.\t." ) vcf7 = cluster_vcf_records.vcf_record.VcfRecord( "ref2\t42\t.\tC\tG\t.\tPASS\t.\t.\t." ) header_lines = [ "##header1", "##header2", "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tsample_name", ] chunker = vcf_chunker.VcfChunker( tmp_out, vcf_infile=infile, ref_fasta=ref_fa, variants_per_split=2, flank_length=1, gramtools_kmer_size=5, ) chunker.make_split_files() self.assertTrue(os.path.exists(chunker.metadata_pickle)) got_header, got_records = cluster_vcf_records.vcf_file_read.vcf_file_to_list( os.path.join(tmp_out, "split.0.in.vcf") ) self.assertEqual(header_lines, got_header) self.assertEqual([vcf1, vcf2, vcf3], got_records) got_header, got_records = cluster_vcf_records.vcf_file_read.vcf_file_to_list( os.path.join(tmp_out, "split.1.in.vcf") ) self.assertEqual(header_lines, got_header) self.assertEqual([vcf2, vcf3, vcf4], got_records) got_header, got_records = cluster_vcf_records.vcf_file_read.vcf_file_to_list( os.path.join(tmp_out, "split.2.in.vcf") ) self.assertEqual(header_lines, got_header) self.assertEqual([vcf5, vcf6], got_records) got_header, got_records = cluster_vcf_records.vcf_file_read.vcf_file_to_list( os.path.join(tmp_out, "split.3.in.vcf") ) self.assertEqual(header_lines, got_header) self.assertEqual([vcf7], got_records) self.assertFalse(os.path.exists(os.path.join(tmp_out, "split.4.in.vcf"))) shutil.rmtree(tmp_out) chunker = vcf_chunker.VcfChunker( tmp_out, vcf_infile=infile, ref_fasta=ref_fa, variants_per_split=4, flank_length=3, gramtools_kmer_size=5, ) chunker.make_split_files() self.assertTrue(os.path.exists(chunker.metadata_pickle)) got_header, got_records = cluster_vcf_records.vcf_file_read.vcf_file_to_list( os.path.join(tmp_out, "split.0.in.vcf") ) self.assertEqual(header_lines, got_header) self.assertEqual([vcf1, vcf2, vcf3, vcf4, vcf5], got_records) got_header, got_records = cluster_vcf_records.vcf_file_read.vcf_file_to_list( os.path.join(tmp_out, "split.1.in.vcf") ) self.assertEqual(header_lines, got_header) self.assertEqual([vcf4, vcf5, vcf6], got_records) got_header, got_records = cluster_vcf_records.vcf_file_read.vcf_file_to_list( os.path.join(tmp_out, "split.2.in.vcf") ) self.assertEqual(header_lines, got_header) self.assertEqual([vcf7], got_records) self.assertFalse(os.path.exists(os.path.join(tmp_out, "split.3.in.vcf"))) chunker2 = vcf_chunker.VcfChunker(tmp_out, gramtools_kmer_size=5) self.assertEqual(chunker.vcf_infile, chunker2.vcf_infile) self.assertEqual(chunker.ref_fasta, chunker2.ref_fasta) self.assertEqual(chunker.variants_per_split, chunker2.variants_per_split) self.assertEqual(chunker.total_splits, chunker2.total_splits) self.assertEqual(chunker.flank_length, chunker2.flank_length) self.assertEqual(chunker.gramtools_kmer_size, chunker2.gramtools_kmer_size) self.assertEqual(chunker.total_split_files, chunker2.total_split_files) self.assertEqual(chunker.vcf_split_files, chunker2.vcf_split_files) shutil.rmtree(tmp_out) def test_make_split_files_2(self): """test make_split_files with different input from previous test""" # These records cause a minos bug. Last record was not being used # when merging because the index was wrong. # They are test data from multi_sample_pipeline tests infile = os.path.join(data_dir, "make_split_files2.in.vcf") tmp_out = "tmp.vcf_chunker.make_split_files2" ref_fa = os.path.join(data_dir, "make_split_files2.in.ref.fa") if os.path.exists(tmp_out): shutil.rmtree(tmp_out) chunker = vcf_chunker.VcfChunker( tmp_out, vcf_infile=infile, ref_fasta=ref_fa, variants_per_split=2, flank_length=200, gramtools_kmer_size=5, ) chunker.make_split_files() self.assertTrue(os.path.exists(chunker.metadata_pickle)) chunker2 = vcf_chunker.VcfChunker(tmp_out, gramtools_kmer_size=5) self.assertEqual(1, len(chunker2.vcf_split_files)) self.assertEqual(3, len(chunker2.vcf_split_files["ref.0"])) self.assertEqual(4, chunker2.vcf_split_files["ref.0"][-1].use_end_index) shutil.rmtree(tmp_out) # Test with two threads chunker = vcf_chunker.VcfChunker( tmp_out, vcf_infile=infile, ref_fasta=ref_fa, variants_per_split=2, flank_length=200, threads=2, gramtools_kmer_size=5, ) chunker.make_split_files() self.assertTrue(os.path.exists(chunker.metadata_pickle)) chunker2 = vcf_chunker.VcfChunker(tmp_out, gramtools_kmer_size=5) self.assertEqual(1, len(chunker2.vcf_split_files)) self.assertEqual(3, len(chunker2.vcf_split_files["ref.0"])) self.assertEqual(4, chunker2.vcf_split_files["ref.0"][-1].use_end_index) shutil.rmtree(tmp_out) def test_merge_files(self): """test merge_files""" vcf_to_split = os.path.join(data_dir, "merge_files.in.vcf") ref_fasta = os.path.join(data_dir, "merge_files.in.ref.fa") tmp_outdir = "tmp.vcf_chunker.merge_files" chunker = vcf_chunker.VcfChunker( tmp_outdir, vcf_infile=vcf_to_split, ref_fasta=ref_fasta, variants_per_split=4, flank_length=3, gramtools_kmer_size=5, ) chunker.make_split_files() to_merge = {} for ref, split_list in chunker.vcf_split_files.items(): to_merge[ref] = [x.filename for x in split_list] tmp_vcf_out = "tmp.vcf_chunker.merge_files.out.vcf" chunker.merge_files(to_merge, tmp_vcf_out) self.assertTrue(filecmp.cmp(vcf_to_split, tmp_vcf_out, shallow=False)) os.unlink(tmp_vcf_out) shutil.rmtree(tmp_outdir)
35.585586
88
0.567511
2,873
23,700
4.298991
0.068918
0.11497
0.126306
0.109222
0.848838
0.829002
0.809732
0.782447
0.765363
0.738078
0
0.03759
0.333249
23,700
665
89
35.639098
0.74402
0.02384
0
0.573744
0
0.001621
0.048608
0.03874
0
0
0
0
0.171799
1
0.009724
false
0.011345
0.009724
0
0.02269
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
557abe7d3140721c2a79214e8a58388bd9de455d
33
py
Python
grasshopper/__init__.py
aholyoke/grasshopper
b9e11ac3aafdb6e2a61cc8a74ca67e36b690da69
[ "BSD-3-Clause" ]
null
null
null
grasshopper/__init__.py
aholyoke/grasshopper
b9e11ac3aafdb6e2a61cc8a74ca67e36b690da69
[ "BSD-3-Clause" ]
null
null
null
grasshopper/__init__.py
aholyoke/grasshopper
b9e11ac3aafdb6e2a61cc8a74ca67e36b690da69
[ "BSD-3-Clause" ]
null
null
null
from .framework import Framework
16.5
32
0.848485
4
33
7
0.75
0
0
0
0
0
0
0
0
0
0
0
0.121212
33
1
33
33
0.965517
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
55984b7c0e3a37ed751f5916d817fb1f0c1d3de6
313
py
Python
python scripts/detect_winning_team.py
Geoffry-Skionfinschii/Datapack_SurvivalGames
476a7da18e6b5a1eca96fcb7969b63b0c0ddc87f
[ "Apache-2.0" ]
10
2020-05-30T09:08:47.000Z
2022-01-28T07:07:56.000Z
python scripts/detect_winning_team.py
Geoffry-Skionfinschii/Datapack_SurvivalGames
476a7da18e6b5a1eca96fcb7969b63b0c0ddc87f
[ "Apache-2.0" ]
18
2020-05-31T15:16:00.000Z
2022-03-13T13:34:17.000Z
python scripts/detect_winning_team.py
Geoffry-Skionfinschii/Datapack_SurvivalGames
476a7da18e6b5a1eca96fcb7969b63b0c0ddc87f
[ "Apache-2.0" ]
5
2020-04-17T15:07:12.000Z
2020-12-02T01:03:45.000Z
def main(): print("# Generated by python script") for i in range(1, 22): print("execute as @r[team=TEAM_{0}, tag=InGame] run tag @a[team=TEAM_{0}] add Winner".format(i)) print("execute as @r[team=TEAM_{0}, tag=InGame] run scoreboard players add @a[team=TEAM_{0}] Wins 1".format(i)) main()
44.714286
119
0.638978
54
313
3.62963
0.518519
0.163265
0.183673
0.153061
0.367347
0.367347
0.367347
0.367347
0.367347
0.367347
0
0.03125
0.182109
313
7
120
44.714286
0.734375
0
0
0
1
0.333333
0.627389
0
0
0
0
0
0
1
0.166667
false
0
0
0
0.166667
0.5
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
6
559dba20f272da8e85bd02cd1799c9bf91921491
10,862
py
Python
09_road_limit/model.py
yeodongbin/2020AIChallengeCode
776c686b65a67bc0d71eed1118eed6cf45ea17c6
[ "MIT" ]
null
null
null
09_road_limit/model.py
yeodongbin/2020AIChallengeCode
776c686b65a67bc0d71eed1118eed6cf45ea17c6
[ "MIT" ]
null
null
null
09_road_limit/model.py
yeodongbin/2020AIChallengeCode
776c686b65a67bc0d71eed1118eed6cf45ea17c6
[ "MIT" ]
null
null
null
import torchvision from torchvision.models.detection.faster_rcnn import FastRCNNPredictor from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor from efficientnet_pytorch import EfficientNet from torchvision.models.detection import FasterRCNN from torchvision.models.detection import MaskRCNN from torchvision.models.detection.rpn import AnchorGenerator from custom_model.faster_rcnn import fasterrcnn_resnet50_fpn from custom_model.mask_rcnn import maskrcnn_resnet50_fpn def get_model_instance_segmentation_custom0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print("fasterrcnn_resnet50_fpn custom call - 41,755,286 (resnet50) / 28,730,006 (resnet18) / 28,730,006 resnet / 22,463,126 / 오잉..light resnet : 22,468,758/ 19,333,398 / custom resent (64 쭉..) 17,664,662") return model def get_model_instance_segmentation0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print("fasterrcnn_resnet50_fpn custom call - 41,755,286 / ") return model def get_model_instance_segmentation(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1280 #anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), # aspect_ratios=((0.5, 1.0, 2.0),)) #roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], # output_size=1, # sampling_ratio=2) #model = FasterRCNN(backbone, # num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print("fasterrcnn_resnet50_fpn call - 41,401,661 / 41,532,886") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) #in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels #hidden_layer = 1 # and replace the mask predictor with a new one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation_custom1(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 model = maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1280 #anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), # aspect_ratios=((0.5, 1.0, 2.0),)) #roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], # output_size=1, # sampling_ratio=2) #model = FasterRCNN(backbone, # num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print("maskrcnn_resnet50_fpn custom call1 - resnet : 24,743,507 mobilenet : 87,366,291 squeezenet : 33,161,683 densnet : 43,702,739, resnet basicblock 3*3 -> 1*1 : 20,549,203 / basic : 20,543,571 / basicblock con1 : 20,195,411 / 채널 : 강제로 128 지정시 13,033,555 / 128 all 변경 : 9,465,555 ") # 분류를 위한 입력 특징 차원을 얻습니다 in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운 것으로 바꿉니다 model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels hidden_layer = 128 # and replace the mask predictor with a new one model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes) return model def get_model_instance_segmentation2(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 1280 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print("mobilenet_v2 call2 - out_channels :1280, 19,540,921") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features = backbone # 미리 학습된 헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) #in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels #hidden_layer = 1 # and replace the mask predictor with a new one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation4(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.squeezenet1_1(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 512 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=7, sampling_ratio=2) mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], output_size=14, sampling_ratio=2) model = MaskRCNN(backbone, num_classes=num_classes, box_roi_pool =roi_pooler, mask_roi_pool = mask_roi_pooler ) #print("squeezenet1_0 call2 - out_channels :1280, 18,052,473 / 72M") #print("squeezenet1_0 call2 - out_channels :516, 4,862,777 / 19.5M") #print("squeezenet1_1 call2 - out_channels :516, 4,849,849 4,862,777 / 19.5M") print("squeezenet1_1 call2 - out_channels :256, 2,757,369 / 11M (15,000,000 / 15,000,000)") print("squeezenet1_1 call2 - out_channels :512, 4,808,441 / 19.2M (15,000,000)") print("squeezenet1_1 call2 - out_channels :512, 33,192,463 33,161,683 / 172M (15,000,000)") # # 분류를 위한 입력 특징 차원을 얻습니다 #in_features = backbone # 미리 학습된 헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) #in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels #hidden_layer = 1 # and replace the mask predictor with a new one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation5(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.densenet161(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 256 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print("densenet161 call2 - out_channels :256, 28,506,873 / 150M") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features = backbone # 미리 학습된 헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) #in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels #hidden_layer = 1 # and replace the mask predictor with a new one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation6(num_classes): backbone = torchvision.models.squeezenet1_1(pretrained=False).features backbone.out_channels = 512 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print("get_model_instance_segmentation6 call6 - out_channels :512, 4,808,441 / (15,000,000) ") return model
45.638655
290
0.62981
1,254
10,862
5.208134
0.157895
0.045935
0.033839
0.047772
0.836166
0.812127
0.79314
0.784872
0.784872
0.784872
0
0.076306
0.288161
10,862
237
291
45.831224
0.768365
0.409593
0
0.5
0
0.053191
0.159317
0.019282
0
0
0
0
0
1
0.085106
false
0
0.095745
0
0.265957
0.106383
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
559e2f16b9c8d8149f2ea49ac5c688b029b0d86d
27
py
Python
steamprofile/__init__.py
aaronlyy/steamprofile
43002e62f4924a2a2040a240ed1362c28ad7a8f5
[ "MIT" ]
null
null
null
steamprofile/__init__.py
aaronlyy/steamprofile
43002e62f4924a2a2040a240ed1362c28ad7a8f5
[ "MIT" ]
null
null
null
steamprofile/__init__.py
aaronlyy/steamprofile
43002e62f4924a2a2040a240ed1362c28ad7a8f5
[ "MIT" ]
null
null
null
from .steamprofile import *
27
27
0.814815
3
27
7.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.111111
27
1
27
27
0.916667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
e94a4e5cc42d1e471dd1294588d5acc65b8a33e1
12,946
py
Python
tests/test_views.py
yezyilomo/drf-pretty-put
1bc77f5f8fea58b2c30e4e3d7c0837b55b679d59
[ "MIT" ]
28
2019-08-27T14:27:41.000Z
2020-02-04T18:54:18.000Z
tests/test_views.py
yezyilomo/drf-pretty-put
1bc77f5f8fea58b2c30e4e3d7c0837b55b679d59
[ "MIT" ]
3
2019-09-04T10:06:15.000Z
2019-09-06T10:48:42.000Z
tests/test_views.py
yezyilomo/drf-pretty-update
1bc77f5f8fea58b2c30e4e3d7c0837b55b679d59
[ "MIT" ]
null
null
null
from django.urls import reverse from rest_framework.test import APITestCase from tests.testapp.models import Book, Course, Student, Phone class ViewTests(APITestCase): def setUp(self): self.book1 = Book.objects.create(title="Advanced Data Structures", author="S.Mobit") self.book2 = Book.objects.create(title="Basic Data Structures", author="S.Mobit") self.course1 = Course.objects.create( name="Data Structures", code="CS210" ) self.course2 = Course.objects.create( name="Programming", code="CS150" ) self.course1.books.set([self.book1, self.book2]) self.course2.books.set([self.book1]) self.student = Student.objects.create( name="Yezy", age=24, course=self.course1 ) self.phone1 = Phone.objects.create(number="076711110", type="Office", student=self.student) self.phone2 = Phone.objects.create(number="073008880", type="Home", student=self.student) def tearDown(self): Book.objects.all().delete() Course.objects.all().delete() Student.objects.all().delete() # **************** POST Tests ********************* # def test_post_on_pk_nested_foreignkey_related_field(self): url = reverse("rstudent-list") data = { "name": "yezy", "age": 33, "course": 2 } response = self.client.post(url, data, format="json") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS150', 'books': [ {"title": "Advanced Data Structures", "author": "S.Mobit"} ] }, 'phone_numbers': [] } ) def test_post_on_writable_nested_foreignkey_related_field(self): url = reverse("wstudent-list") data = { "name": "yezy", "age": 33, "course": {"name": "Programming", "code": "CS50"}, } response = self.client.post(url, data, format="json") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [] }, 'phone_numbers': [] } ) def test_post_with_add_operation(self): url = reverse("rcourse-list") data = { "name": "Data Structures", "code": "CS310", "books": {"add":[1,2]} } response = self.client.post(url, data, format="json") self.assertEqual( response.data, { "name": "Data Structures", "code": "CS310", "books": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] } ) def test_post_with_create_operation(self): data = { "name": "Data Structures", "code": "CS310", "books": {"create": [ {"title": "Linear Math", "author": "Me"}, {"title": "Algebra Three", "author": "Me"} ]} } url = reverse("wcourse-list") response = self.client.post(url, data, format="json") self.assertEqual( response.data, { "name": "Data Structures", "code": "CS310", "books": [ {"title": "Linear Math", "author": "Me"}, {"title": "Algebra Three", "author": "Me"} ] } ) def test_post_on_deep_nested_fields(self): url = reverse("wstudent-list") data = { "name": "yezy", "age": 33, "course": { "name": "Programming", "code": "CS50", "books": {"create": [ {"title": "Python Tricks", "author": "Dan Bader"} ]} } } response = self.client.post(url, data, format="json") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {"title": "Python Tricks", "author": "Dan Bader"} ] }, 'phone_numbers': [] } ) def test_post_on_many_2_one_relation(self): url = reverse("wstudent-list") data = { "name": "yezy", "age": 33, "course": {"name": "Programming", "code": "CS50"}, "phone_numbers": { 'create': [ {'number': '076750000', 'type': 'office'} ] } } response = self.client.post(url, data, format="json") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [] }, 'phone_numbers': [ {'number': '076750000', 'type': 'office', 'student': 2} ] } ) # **************** PUT Tests ********************* # def test_put_on_pk_nested_foreignkey_related_field(self): url = reverse("rstudent-detail", args=[self.student.id]) data = { "name": "yezy", "age": 33, "course": 2 } response = self.client.put(url, data, format="json") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS150', 'books': [ {"title": "Advanced Data Structures", "author": "S.Mobit"} ] }, 'phone_numbers': [ {'number': '076711110', 'type': 'Office', 'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1} ] } ) def test_put_on_writable_nested_foreignkey_related_field(self): url = reverse("wstudent-detail", args=[self.student.id]) data = { "name": "yezy", "age": 33, "course": {"name": "Programming", "code": "CS50"} } response = self.client.put(url, data, format="json") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number': '076711110', 'type': 'Office', 'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1} ] } ) def test_put_with_add_operation(self): url = reverse("rcourse-detail", args=[self.course2.id]) data = { "name": "Data Structures", "code": "CS410", "books": { "add": [2] } } response = self.client.put(url, data, format="json") self.assertEqual( response.data, { "name": "Data Structures", "code": "CS410", "books": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] } ) def test_put_with_remove_operation(self): url = reverse("rcourse-detail", args=[self.course2.id]) data = { "name": "Data Structures", "code": "CS410", "books": { "remove": [1] } } response = self.client.put(url, data, format="json") self.assertEqual( response.data, { "name": "Data Structures", "code": "CS410", "books": [] } ) def test_put_with_create_operation(self): url = reverse("wcourse-detail", args=[self.course2.id]) data = { "name": "Data Structures", "code": "CS310", "books": { "create": [ {"title": "Primitive Data Types", "author": "S.Mobit"} ] } } response = self.client.put(url, data, format="json") self.assertEqual( response.data, { "name": "Data Structures", "code": "CS310", "books": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {"title": "Primitive Data Types", "author": "S.Mobit"} ] } ) def test_put_with_update_operation(self): url = reverse("wcourse-detail", args=[self.course2.id]) data = { "name": "Data Structures", "code": "CS310", "books": { "update": { 1: {"title": "React Programming", "author": "M.Json"} } } } response = self.client.put(url, data, format="json") self.assertEqual( response.data, { "name": "Data Structures", "code": "CS310", "books": [ {"title": "React Programming", "author": "M.Json"} ] } ) def test_put_on_deep_nested_fields(self): url = reverse("wstudent-detail", args=[self.student.id]) data = { "name": "yezy", "age": 33, "course": { "name": "Programming", "code": "CS50", "books": { "remove": [1] } } } response = self.client.put(url, data, format="json") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number': '076711110', 'type': 'Office', 'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1} ] } ) def test_put_on_many_2_one_relation(self): url = reverse("wstudent-detail", args=[self.student.id]) data = { "name": "yezy", "age": 33, "course": {"name": "Programming", "code": "CS50"}, "phone_numbers": { 'update': { 1: {'number': '073008811', 'type': 'office'} }, 'create': [ {'number': '076750000', 'type': 'office'} ] } } response = self.client.put(url, data, format="json") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number': '073008811', 'type': 'office', 'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1}, {'number': '076750000', 'type': 'office', 'student': 1} ] } )
32.691919
99
0.400046
996
12,946
5.114458
0.111446
0.043973
0.03671
0.047114
0.851786
0.816647
0.78563
0.738516
0.719278
0.702199
0
0.038811
0.446702
12,946
396
100
32.691919
0.672344
0.007647
0
0.581267
0
0
0.217489
0
0
0
0
0
0.038567
1
0.044077
false
0
0.008264
0
0.055096
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e98574d4316ffc2546acdf0d678a3e6348fee7c4
1,319
py
Python
prob08/prob8.py
speyejack/EulersProblems
b13714c9bab15d8ac31ea66b4ddc5de944e4f8d9
[ "MIT" ]
null
null
null
prob08/prob8.py
speyejack/EulersProblems
b13714c9bab15d8ac31ea66b4ddc5de944e4f8d9
[ "MIT" ]
null
null
null
prob08/prob8.py
speyejack/EulersProblems
b13714c9bab15d8ac31ea66b4ddc5de944e4f8d9
[ "MIT" ]
null
null
null
from operator import itemgetter from functools import reduce num = """73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450""".replace("\n", "").strip() search_range = 13 product = max([reduce(lambda x,y:int(x)*int(y),sub) for sub in [num[sub:sub+search_range] for sub in range(len(num)-search_range)]]) print("Greatest product: {}".format(product))
45.482759
132
0.91812
67
1,319
18.029851
0.686567
0.027318
0.013245
0
0
0
0
0
0
0
0
0.790845
0.039424
1,319
28
133
47.107143
0.162589
0
0
0
0
0
0.789833
0.758725
0
1
0
0
0
1
0
false
0
0.08
0
0.08
0.04
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
6
e9a259e76f8beb10a60cd94e22813838141dff75
37
py
Python
setwallpaper/__init__.py
tinaxd/setwallpaper
199787c7603d4ac7bdf0c2bdbaa09720ed53f93f
[ "MIT" ]
null
null
null
setwallpaper/__init__.py
tinaxd/setwallpaper
199787c7603d4ac7bdf0c2bdbaa09720ed53f93f
[ "MIT" ]
1
2021-07-29T10:45:29.000Z
2021-07-29T10:45:29.000Z
setwallpaper/__init__.py
tinaxd/setwallpaper
199787c7603d4ac7bdf0c2bdbaa09720ed53f93f
[ "MIT" ]
null
null
null
from .wallpaper import set_wallpaper
18.5
36
0.864865
5
37
6.2
0.8
0
0
0
0
0
0
0
0
0
0
0
0.108108
37
1
37
37
0.939394
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
75d7b71b9034ef64fd2b359b3feab75f7cb6ee30
943
py
Python
Easy/Repeated String/repeatedString.py
Zealll/HackerRank
0f03ba284a699f5f37138e866b348c616d4d101a
[ "MIT" ]
null
null
null
Easy/Repeated String/repeatedString.py
Zealll/HackerRank
0f03ba284a699f5f37138e866b348c616d4d101a
[ "MIT" ]
null
null
null
Easy/Repeated String/repeatedString.py
Zealll/HackerRank
0f03ba284a699f5f37138e866b348c616d4d101a
[ "MIT" ]
null
null
null
def repeatedString(s, n): dictionary = {'a': 0} length = n // len(s) if 'a' not in s: return 0 for i in s: if i == 'a': dictionary['a'] += 1 remaining = n - len(s) * length total = int(dictionary['a'] * length) if remaining > 0: for i in range(remaining): if s[i] == 'a': total += 1 return total # def repeatedString(s, n): # dictionary = {} # length = n // len(s) # if 'a' not in s: # return 0 # for i in s: # if i == 'a': # if 'a' not in dictionary: # dictionary['a'] = 1 # else: # dictionary['a'] += 1 # remaining = n - len(s) * length # total = int(dictionary['a'] * length) # if remaining > 0: # for i in range(remaining): # if s[i] == 'a': # total += 1 # return total
15.983051
43
0.415695
115
943
3.408696
0.182609
0.168367
0.05102
0.071429
0.908163
0.760204
0.760204
0.760204
0.760204
0.760204
0
0.018868
0.437964
943
59
44
15.983051
0.720755
0.510074
0
0
0
0
0.013514
0
0
0
0
0
0
1
0.066667
false
0
0
0
0.2
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
f93ed093ff43559433edfd036a4c4cb26d6e5443
24,160
py
Python
experiments/utils/nets/cnn_factory.py
ezetl/deep-learning-techniques-thesis
8092dfc8bbd99d9a0d148139a381363b46fe09b4
[ "CC0-1.0" ]
null
null
null
experiments/utils/nets/cnn_factory.py
ezetl/deep-learning-techniques-thesis
8092dfc8bbd99d9a0d148139a381363b46fe09b4
[ "CC0-1.0" ]
null
null
null
experiments/utils/nets/cnn_factory.py
ezetl/deep-learning-techniques-thesis
8092dfc8bbd99d9a0d148139a381363b46fe09b4
[ "CC0-1.0" ]
null
null
null
#!/usr/bin/env python2.7 import caffe from caffe import (layers as L, params as P) from layers_wrappers import * caffe.set_device(0) caffe.set_mode_gpu() class MNISTNetFactory: @staticmethod def standar(lmdb_path=None, batch_size=125, scale=1.0, is_train=True, learn_all=True): """ Creates a protoxt similar to the first layers of AlexNet architecture for the MNIST experiment :param lmdb_path: str. Path to train LMDB :param batch_size: int. Batch size :param scale: float. How to scale the images :param is_train: bool. Flag indicating if this is for testing or training :returns: Caffe NetSpec, tuple with names of loss blobs, tuple with name of accuracy blobs """ n = caffe.NetSpec() phase = caffe.TRAIN if is_train else caffe.TEST n.data, n.label = L.Data(include=dict(phase=phase), batch_size=batch_size, backend=P.Data.LMDB, source=lmdb_path, transform_param=dict(scale=scale), ntop=2) n.conv1 = L.Convolution(n.data, kernel_size=11, stride=4, num_output=96, param=[weight_param('conv1_w', learn_all=learn_all), bias_param('conv1_b', learn_all=learn_all)], weight_filler=weight_filler, bias_filler=bias_filler_0) n.relu1 = L.ReLU(n.conv1, in_place=True) n.pool1 = L.Pooling(n.relu1, pool=P.Pooling.MAX, kernel_size=3, stride=2) n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75) n.conv2 = L.Convolution(n.norm1, kernel_size=5, num_output=256, pad=2, group=2, param=[weight_param('conv2_w', learn_all=learn_all), bias_param('conv2_b', learn_all=learn_all)], weight_filler=weight_filler, bias_filler=bias_filler_0) n.relu2 = L.ReLU(n.conv2, in_place=True) n.pool2 = L.Pooling(n.relu2, pool=P.Pooling.MAX, kernel_size=3, stride=2) n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75) n.fc500 = L.InnerProduct(n.norm2, num_output=500, param=[weight_param('fc500_w', learn_all=True), bias_param('fc500_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.relu3 = L.ReLU(n.fc500, in_place=True) if is_train: n.dropout = fc10input = L.Dropout(n.relu3, in_place=True) else: fc10input = n.relu3 # Learn all true because we always want to train the top classifier no matter if we are training from scratch or finetuning n.fc10 = L.InnerProduct(fc10input, num_output=10, param=[weight_param('fc10_w', learn_all=True), bias_param('fc10_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) if is_train: n.loss = L.SoftmaxWithLoss(n.fc10, n.label) n.acc = L.Accuracy(n.fc10, n.label, include=dict(phase=caffe.TEST)) # Returning the name of the loss/acc layers is useful because then we can # know which outputs of the net we can track to test the 'health' # of the training process return n, ('loss',), ('acc',) @staticmethod def siamese_egomotion(lmdb_path=None, labels_lmdb_path=None, batch_size=125, scale=1.0, is_train=True, learn_all=False, sfa=False): """ Creates a protoxt for the AlexNet architecture for the MNIST experiment Uses Egomotion as stated in the paper :param lmdb_path: str. Path to train LMDB :param labels_lmdb_path: str. Path to train LMDB labels :param batch_size: int. Batch size :param scale: float. How to scale the images :param is_train: bool. Flag indicating if this is for testing or training :param learn_all: bool. Flag indicating if we should learn all the layers from scratch :returns: Caffe NetSpec, tuple with names of loss blobs, tuple with name of accuracy blobs """ n = caffe.NetSpec() n.data, n.label = input_layers(lmdb_path=lmdb_path, labels_lmdb_path=labels_lmdb_path, batch_size=batch_size, scale=scale, is_train=is_train) # Slice data/labels for MNIST n.data0, n.data1 = L.Slice(n.data, slice_param=dict(axis=1, slice_point=1), ntop=2) n.labelx, n.labely, n.labelz = L.Slice(n.label, slice_param=dict(axis=1, slice_point=[1,2]), ntop=3) # BCNN n.norm2, n.norm2_p = bcnn(n.data0, n.data1, n, learn_all, True) # TCNN n.concat = L.Concat(n.norm2, n.norm2_p, concat_param=dict(axis=1)) n.fc1000 = L.InnerProduct(n.concat, num_output=1000, param=[weight_param('fc1000_w', learn_all=True), bias_param('fc1000_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.relu3 = L.ReLU(n.fc1000, in_place=True) if is_train: n.dropout = fcxinput = fcyinput = fczinput = L.Dropout(n.relu3, in_place=True) else: fcxinput = fcyinput = fczinput = n.relu3 # Classifiers n.fcx = L.InnerProduct(fcxinput, num_output=7, param=[weight_param('fcx_w', learn_all=True), bias_param('fcx_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.fcy = L.InnerProduct(fcyinput, num_output=7, param=[weight_param('fcy_w', learn_all=True), bias_param('fcy_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.fcz = L.InnerProduct(fczinput, num_output=20, param=[weight_param('fcz_w', learn_all=True), bias_param('fcz_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.loss_x = L.SoftmaxWithLoss(n.fcx, n.labelx) n.loss_y = L.SoftmaxWithLoss(n.fcy, n.labely) n.loss_z = L.SoftmaxWithLoss(n.fcz, n.labelz) n.acc_x = L.Accuracy(n.fcx, n.labelx, include=dict(phase=caffe.TEST)) n.acc_y = L.Accuracy(n.fcy, n.labely, include=dict(phase=caffe.TEST)) n.acc_z = L.Accuracy(n.fcz, n.labelz, include=dict(phase=caffe.TEST)) return n, ('loss_x', 'loss_y', 'loss_z'), ('acc_x', 'acc_y', 'acc_z') @staticmethod def siamese_contrastive(lmdb_path=None, labels_lmdb_path=None, batch_size=125, scale=1.0, contrastive_margin=10, is_train=True, learn_all=False, sfa=False): """ Creates a protoxt for the AlexNet architecture for the MNIST experiment Uses Contrastive loss :param lmdb_path: str. Path to train LMDB :param labels_lmdb_path: str. Path to train LMDB labels :param batch_size: int. Batch size :param scale: float. How to scale the images :param contrastive_margin: int. Margin for the contrastive loss layer :param is_train: bool. Flag indicating if this is for testing or training :param learn_all: bool. Flag indicating if we should learn all the layers from scratch :returns: Caffe NetSpec, tuple with names of loss blobs, tuple with name of accuracy blobs """ n = caffe.NetSpec() n.data, n.label = input_layers(lmdb_path=lmdb_path, labels_lmdb_path=labels_lmdb_path, batch_size=batch_size, scale=scale, is_train=is_train) # Slice data/labels for MNIST n.data0, n.data1 = L.Slice(n.data, slice_param=dict(axis=1, slice_point=1), ntop=2) # BCNN n.norm2, n.norm2_p = bcnn(n.data0, n.data1, n, learn_all, True) # TCNNs n.fc1 = L.InnerProduct(n.norm2, num_output=500, param=[weight_param('fc1_p_w', learn_all=True), bias_param('fc1_p_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.relu3 = L.ReLU(n.fc1, in_place=True) n.dropout1 = L.Dropout(n.relu3, in_place=True) n.fc2 = L.InnerProduct(n.relu3, num_output=100, param=[weight_param('fc2_p_w', learn_all=True), bias_param('fc2_p_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.fc1_p = L.InnerProduct(n.norm2_p, num_output=500, param=[weight_param('fc1_p_w', learn_all=True), bias_param('fc1_p_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.relu3_p = L.ReLU(n.fc1_p, in_place=True) n.dropout1_p = L.Dropout(n.relu3_p, in_place=True) n.fc2_p = L.InnerProduct(n.relu3_p, num_output=100, param=[weight_param('fc2_p_w', learn_all=True), bias_param('fc2_p_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.contrastive = L.ContrastiveLoss(n.fc2, n.fc2_p, n.label, contrastive_loss_param=dict(margin=contrastive_margin)) return n, ('contrastive',), None class KITTINetFactory: @staticmethod def siamese_egomotion(lmdb_path=None, labels_lmdb_path=None, mean_file=None, batch_size=125, scale=1.0, is_train=True, learn_all=True): """ Creates a protoxt for the AlexNet architecture :param lmdb_path: str. Path to train LMDB :param labels_lmdb_path: str. Path to train LMDB labels :param test_lmdb: str. Path to train LMDB :param test_labels_lmdb: str. Path to test LMDB labels :param batch_size: int. Batch size :param scale: float. How to scale the images :param is_train: bool. Flag indicating if this is for testing or training :param learn_all: bool. Flag indicating if we should learn all the layers from scratch :returns: Caffe NetSpec, tuple with names of loss blobs, tuple with name of accuracy blobs """ n = caffe.NetSpec() n.data, n.label = input_layers(lmdb_path=lmdb_path, labels_lmdb_path=labels_lmdb_path, mean_file=mean_file, batch_size=batch_size, scale=scale, is_train=is_train) # Slice data/labels n.data0, n.data1 = L.Slice(n.data, slice_param=dict(axis=1, slice_point=3), ntop=2) n.labelx, n.labely, n.labelz = L.Slice(n.label, slice_param=dict(axis=1, slice_point=[1,2]), ntop=3) # BCNN relu5, relu5_p = bcnn(n.data0, n.data1, n, learn_all, False) # TCNN n.concat = L.Concat(relu5, relu5_p, concat_param=dict(axis=1)) n.conv6 = L.Convolution(n.concat, kernel_size=3, stride=2, num_output=256, param=[weight_param('conv6_w', learn_all=learn_all), bias_param('conv6_b', learn_all=learn_all)], weight_filler=weight_filler_fc, bias_filler=bias_filler_0) n.relu6 = L.ReLU(n.conv6, in_place=True) n.conv7 = L.Convolution(n.relu6, kernel_size=3, stride=2, num_output=128, param=[weight_param('conv7_w', learn_all=learn_all), bias_param('conv7_b', learn_all=learn_all)], weight_filler=weight_filler, bias_filler=bias_filler_0) n.relu7 = L.ReLU(n.conv7, in_place=True) n.fc7_ego = L.InnerProduct(n.relu7, num_output=500, param=[weight_param('fc7_ego_w', learn_all=True), bias_param('fc7_ego_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.relu8 = L.ReLU(n.fc7_ego, in_place=True) if is_train: n.drop = fcxinput = fcyinput = fczinput = L.Dropout(n.relu8, dropout_param=dict(dropout_ratio=0.5), in_place=True) else: fcxinput = fcyinput = fczinput = n.relu8 # Classifiers n.fcx = L.InnerProduct(fcxinput, num_output=20, param=[weight_param('fcx_w', learn_all=True), bias_param('fcx_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.fcy = L.InnerProduct(fcyinput, num_output=20, param=[weight_param('fcy_w', learn_all=True), bias_param('fcy_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.fcz = L.InnerProduct(fczinput, num_output=20, param=[weight_param('fcz_w', learn_all=True), bias_param('fcz_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) if is_train: n.loss_x = L.SoftmaxWithLoss(n.fcx, n.labelx) n.loss_y = L.SoftmaxWithLoss(n.fcy, n.labely) n.loss_z = L.SoftmaxWithLoss(n.fcz, n.labelz) n.acc_x = L.Accuracy(n.fcx, n.labelx, include=dict(phase=caffe.TEST)) n.acc_y = L.Accuracy(n.fcy, n.labely, include=dict(phase=caffe.TEST)) n.acc_z = L.Accuracy(n.fcz, n.labelz, include=dict(phase=caffe.TEST)) return n, ('loss_x', 'loss_y', 'loss_z'), ('acc_x', 'acc_y', 'acc_z') @staticmethod def siamese_contrastive(lmdb_path=None, labels_lmdb_path=None, mean_file=None, batch_size=125, scale=1.0, contrastive_margin=10, is_train=True, learn_all=True): """ Creates a protoxt for siamese AlexNet architecture with a contrastive loss layer on top :param lmdb_path: str. Path to train LMDB :param labels_lmdb_path: str. Path to train LMDB labels :param test_lmdb: str. Path to train LMDB :param test_labels_lmdb: str. Path to test LMDB labels :param batch_size: int. Batch size :param scale: float. How to scale the images :param contrastive_margin: int. Margin for the contrastive loss layer :param is_train: bool. Flag indicating if this is for testing or training :param learn_all: bool. Flag indicating if we should learn all the layers from scratch :returns: Caffe NetSpec, tuple with names of loss blobs, tuple with name of accuracy blobs """ n = caffe.NetSpec() n.data, n.label = input_layers(lmdb_path=lmdb_path, labels_lmdb_path=labels_lmdb_path, mean_file=mean_file, batch_size=batch_size, scale=scale, is_train=is_train) # Slice data/labels n.data0, n.data1 = L.Slice(n.data, slice_param=dict(axis=1, slice_point=3), ntop=2) # BCNN relu5, relu5_p = bcnn(n.data0, n.data1, n, learn_all, False) # TCNNs n.fc1 = L.InnerProduct(relu5, num_output=500, param=[weight_param('fc1_p_w', learn_all=True), bias_param('fc1_p_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.relu6 = L.ReLU(n.fc1, in_place=True) n.dropout1 = L.Dropout(n.relu6, in_place=True) n.fc2 = L.InnerProduct(n.relu6, num_output=100, param=[weight_param('fc2_p_w', learn_all=True), bias_param('fc2_p_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.fc1_p = L.InnerProduct(relu5_p, num_output=500, param=[weight_param('fc1_p_w', learn_all=True), bias_param('fc1_p_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.relu6_p = L.ReLU(n.fc1_p, in_place=True) n.dropout1_p = L.Dropout(n.relu6_p, in_place=True) n.fc2_p = L.InnerProduct(n.relu6_p, num_output=100, param=[weight_param('fc2_p_w', learn_all=True), bias_param('fc2_p_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.contrastive = L.ContrastiveLoss(n.fc2, n.fc2_p, n.label, contrastive_loss_param=dict(margin=contrastive_margin)) return n, ('contrastive',), None @staticmethod def standar(lmdb_path=None, labels_lmdb_path=None, batch_size=126, mean_file=None, scale=1.0, is_train=True, num_classes=397, learn_all=True, layers='5', is_imagenet=False): """ Creates a protoxt for the AlexNet architecture :param lmdb_path: str. Path to train LMDB :param labels_lmdb_path: str. Path to train LMDB labels :param test_lmdb: str. Path to train LMDB :param test_labels_lmdb: str. Path to test LMDB labels :param batch_size: int. Batch size :param scale: float. How to scale the images :param is_train: bool. Flag indicating if this is for testing or training :param num_classes: int. number of classes for the top classifier :param classifier_name: str. name of the top classifier :param learn_all: bool. Flag indicating if we should learn all the layers from scratch :param layers: str. from which layer we will extract features to train a classifier :returns: Caffe NetSpec, tuple with names of loss blobs, tuple with name of accuracy blobs """ n = caffe.NetSpec() n.data, n.label = input_layers(lmdb_path=lmdb_path, labels_lmdb_path=labels_lmdb_path, mean_file=mean_file, batch_size=batch_size, scale=scale, is_train=is_train) n.conv1 = L.Convolution(n.data, kernel_size=11, stride=4, num_output=96, param=[weight_param('conv1_w', learn_all=learn_all), bias_param('conv1_b', learn_all=learn_all)], weight_filler=weight_filler, bias_filler=bias_filler_0) n.relu1 = L.ReLU(n.conv1, in_place=True) n.pool1 = L.Pooling(n.relu1, pool=P.Pooling.MAX, kernel_size=3, stride=2) n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75) if layers == '1': n.fc_intermediate = L.InnerProduct(n.norm1, num_output=num_classes, param=[weight_param('fc_intermediate_w', learn_all=True), bias_param('fc_intermediate_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) if is_train: n.loss = L.SoftmaxWithLoss(n.fc_intermediate, n.label) n.acc = L.Accuracy(n.fc_intermediate, n.label, include=dict(phase=caffe.TEST)) return n, ('loss',), ('acc',) n.conv2 = L.Convolution(n.norm1, kernel_size=5, num_output=256, pad=2, group=2, param=[weight_param('conv2_w', learn_all=learn_all), bias_param('conv2_b', learn_all=learn_all)], weight_filler=weight_filler, bias_filler=bias_filler_0) n.relu2 = L.ReLU(n.conv2, in_place=True) n.pool2 = L.Pooling(n.relu2, pool=P.Pooling.MAX, kernel_size=3, stride=2) n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75) if layers == '2': n.fc_intermediate = L.InnerProduct(n.norm2, num_output=num_classes, param=[weight_param('fc_intermediate_w', learn_all=True), bias_param('fc_intermediate_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) if is_train: n.loss = L.SoftmaxWithLoss(n.fc_intermediate, n.label) n.acc = L.Accuracy(n.fc_intermediate, n.label, include=dict(phase=caffe.TEST)) return n, ('loss',), ('acc',) n.conv3 = L.Convolution(n.norm2, kernel_size=3, num_output=384, pad=1, param=[weight_param('conv3_w', learn_all=learn_all), bias_param('conv3_b', learn_all=learn_all)], weight_filler=weight_filler, bias_filler=bias_filler_0) n.relu3 = L.ReLU(n.conv3, in_place=True) if layers == '3': n.fc_prev = L.InnerProduct(n.relu3, num_output=1000, param=[weight_param('fc_prev_w', learn_all=True), bias_param('fc_prev_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.relu_prev = L.ReLU(n.fc_prev, in_place=True) n.fc_intermediate = L.InnerProduct(n.relu_prev, num_output=num_classes, param=[weight_param('fc_intermediate_w', learn_all=True), bias_param('fc_intermediate_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) if is_train: n.loss = L.SoftmaxWithLoss(n.fc_intermediate, n.label) n.acc = L.Accuracy(n.fc_intermediate, n.label, include=dict(phase=caffe.TEST)) return n, ('loss',), ('acc',) n.conv4 = L.Convolution(n.relu3, kernel_size=3, num_output=384, pad=1, group=2, param=[weight_param('conv4_w', learn_all=learn_all), bias_param('conv4_b', learn_all=learn_all)], weight_filler=weight_filler, bias_filler=bias_filler_0) n.relu4 = L.ReLU(n.conv4, in_place=True) if layers == '4': n.fc_prev = L.InnerProduct(n.relu4, num_output=1000, param=[weight_param('fc_prev_w', learn_all=True), bias_param('fc_prev_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.relu_prev = L.ReLU(n.fc_prev, in_place=True) n.fc_intermediate = L.InnerProduct(n.relu_prev, num_output=num_classes, param=[weight_param('fc_intermediate_w', learn_all=True), bias_param('fc_intermediate_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) if is_train: n.loss = L.SoftmaxWithLoss(n.fc_intermediate, n.label) n.acc = L.Accuracy(n.fc_intermediate, n.label, include=dict(phase=caffe.TEST)) return n, ('loss',), ('acc',) n.conv5 = L.Convolution(n.relu4, kernel_size=3, num_output=256, pad=1, group=2, param=[weight_param('conv5_w', learn_all=learn_all), bias_param('conv5_b', learn_all=learn_all)], weight_filler=weight_filler, bias_filler=bias_filler_0) n.relu5 = L.ReLU(n.conv5, in_place=True) if not is_imagenet: if layers == '5': n.fc_prev = L.InnerProduct(n.relu5, num_output=1000, param=[weight_param('fc_prev_w', learn_all=True), bias_param('fc_prev_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.relu_prev = L.ReLU(n.fc_prev, in_place=True) n.fc_intermediate = L.InnerProduct(n.relu_prev, num_output=num_classes, param=[weight_param('fc_intermediate_w', learn_all=True), bias_param('fc_intermediate_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) if is_train: n.loss = L.SoftmaxWithLoss(n.fc_intermediate, n.label) n.acc = L.Accuracy(n.fc_intermediate, n.label, include=dict(phase=caffe.TEST)) return n, ('loss',), ('acc',) n.fc6 = L.InnerProduct(n.relu5, num_output=4096, param=[weight_param('fc6_w', learn_all=True), bias_param('fc6_b', learn_all=learn_all)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.relu6 = L.ReLU(n.fc6, in_place=True) if is_train: n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True) else: fc7input = n.relu6 n.fc7 = L.InnerProduct(fc7input, num_output=4096, param=[weight_param('fc7_w', learn_all=True), bias_param('fc7_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.relu7 = L.ReLU(n.fc7, in_place=True) if is_train: n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True) else: fc8input = n.relu7 n.fc8 = L.InnerProduct(fc8input, num_output=num_classes, param=[weight_param('fc8_w', learn_all=True), bias_param('fc8_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) if is_train: n.loss = L.SoftmaxWithLoss(n.fc8, n.label) n.acc = L.Accuracy(n.fc8, n.label, include=dict(phase=caffe.TEST)) else: if layers == '5': n.fc_imgnet = L.InnerProduct(n.relu5, num_output=num_classes, param=[weight_param('fc_w', learn_all=True), bias_param('fc_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) if is_train: n.loss = L.SoftmaxWithLoss(n.fc_imgnet, n.label) n.acc = L.Accuracy(n.fc_imgnet, n.label, include=dict(phase=caffe.TEST)) return n, ('loss',), ('acc',) n.fc6_imgnet = L.InnerProduct(n.relu5, num_output=4096, param=[weight_param('fc6_w', learn_all=True), bias_param('fc6_b', learn_all=learn_all)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.relu6 = L.ReLU(n.fc6_imgnet, in_place=True) if is_train: n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True) else: fc7input = n.relu6 n.fc7_imgnet = L.InnerProduct(fc7input, num_output=4096, param=[weight_param('fc7_w', learn_all=True), bias_param('fc7_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) n.relu7 = L.ReLU(n.fc7_imgnet, in_place=True) if is_train: n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True) else: fc8input = n.relu7 n.fc8_imgnet = L.InnerProduct(fc8input, num_output=num_classes, param=[weight_param('fc8_w', learn_all=True), bias_param('fc8_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1) if is_train: n.loss = L.SoftmaxWithLoss(n.fc8_imgnet, n.label) n.acc = L.Accuracy(n.fc8_imgnet, n.label, include=dict(phase=caffe.TEST)) return n, ('loss',), ('acc',)
61.319797
254
0.683733
3,825
24,160
4.072157
0.062745
0.064201
0.0547
0.064715
0.900616
0.888354
0.860683
0.830573
0.797509
0.793015
0
0.027645
0.195985
24,160
393
255
61.475827
0.774208
0.166556
0
0.564815
0
0
0.040963
0
0
0
0
0
0
1
0.027778
false
0
0.013889
0
0.106481
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
f95d80f6b2f10443f11498a8fbea081fb1bc38e2
38
py
Python
src/affe/tests/__init__.py
eliavw/affe
0e57d7f40cb67f9a300292e03e3f83b4b591d1e3
[ "MIT" ]
1
2020-12-02T06:16:00.000Z
2020-12-02T06:16:00.000Z
src/affe/tests/__init__.py
eliavw/affe
0e57d7f40cb67f9a300292e03e3f83b4b591d1e3
[ "MIT" ]
null
null
null
src/affe/tests/__init__.py
eliavw/affe
0e57d7f40cb67f9a300292e03e3f83b4b591d1e3
[ "MIT" ]
null
null
null
from .resources import get_dummy_flow
19
37
0.868421
6
38
5.166667
1
0
0
0
0
0
0
0
0
0
0
0
0.105263
38
1
38
38
0.911765
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f9bdd7bc3ffc492d558ea976403f626c443d40c0
1,840
py
Python
tests/test_logic/test_tree/test_functions.py
cdhiraj40/wemake-python-styleguide
7cef9be081d594c30045b7a98cae77a9be46e1aa
[ "MIT" ]
1,931
2018-03-17T13:52:45.000Z
2022-03-27T09:39:17.000Z
tests/test_logic/test_tree/test_functions.py
amansr02/wemake-python-styleguide
681035ed21fbe28ebfb32b8807b98e8de76b64aa
[ "MIT" ]
2,231
2018-03-09T21:19:05.000Z
2022-03-31T08:35:37.000Z
tests/test_logic/test_tree/test_functions.py
amansr02/wemake-python-styleguide
681035ed21fbe28ebfb32b8807b98e8de76b64aa
[ "MIT" ]
492
2018-05-18T21:20:28.000Z
2022-03-20T14:11:50.000Z
import pytest from wemake_python_styleguide.logic.tree import functions @pytest.mark.parametrize(('function_call', 'function_name'), [ # Simple builtin functions ('print("Hello world!")', 'print'), ('int("10")', 'int'), ('bool(1)', 'bool'), ('open("/tmp/file.txt", "r")', 'open'), ('str(10)', 'str'), # Functions in modules ('datetime.timedelta(days=1)', 'datetime.timedelta'), ('cmath.sqrt(100)', 'cmath.sqrt'), # Functions in (made up) objects ('dt.strftime("%H:%M")', 'dt.strftime'), ('obj.funct()', 'obj.funct'), ]) def test_given_function_called_no_split( parse_ast_tree, function_call: str, function_name: str, ) -> None: """Test given_function_called without splitting the modules.""" tree = parse_ast_tree(function_call) node = tree.body[0].value called_function = functions.given_function_called(node, [function_name]) assert called_function == function_name @pytest.mark.parametrize(('function_call', 'function_name'), [ # Simple builtin functions ('print("Hello world!")', 'print'), ('int("10")', 'int'), ('bool(1)', 'bool'), ('open("/tmp/file.txt", "r")', 'open'), ('str(10)', 'str'), # Functions in modules ('datetime.timedelta(days=1)', 'timedelta'), ('cmath.sqrt(100)', 'sqrt'), # Functions in (made up) objects ('dt.strftime("%H:%M")', 'strftime'), ('obj.funct()', 'funct'), ]) def test_given_function_called_with_split( parse_ast_tree, function_call: str, function_name: str, ) -> None: """Test given_function_called splitting the modules.""" tree = parse_ast_tree(function_call) node = tree.body[0].value called_function = functions.given_function_called( node, [function_name], split_modules=True, ) assert called_function == function_name
30.666667
76
0.636957
224
1,840
5.022321
0.290179
0.085333
0.101333
0.081778
0.837333
0.780444
0.725333
0.725333
0.725333
0.725333
0
0.013201
0.17663
1,840
59
77
31.186441
0.729373
0.142391
0
0.571429
0
0
0.286812
0.060179
0
0
0
0
0.047619
1
0.047619
false
0
0.047619
0
0.095238
0.047619
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
f9c085c8c1b92b87c14d3482f5a9791aa3b801ae
106
py
Python
weld-python/weld/grizzly/core/indexes/base.py
tustvold/weld
dcbba9a45ae2a190b31badec530ea54a58437606
[ "BSD-3-Clause" ]
2,912
2017-03-16T19:32:54.000Z
2022-03-30T09:03:11.000Z
weld-python/weld/grizzly/core/indexes/base.py
QiangHeisenberg/weld
0926f84f6f4361e40842fcd6e00b7afdcc10a87f
[ "BSD-3-Clause" ]
285
2017-03-16T18:01:00.000Z
2021-08-12T10:58:23.000Z
weld-python/weld/grizzly/core/indexes/base.py
QiangHeisenberg/weld
0926f84f6f4361e40842fcd6e00b7afdcc10a87f
[ "BSD-3-Clause" ]
272
2017-03-17T06:28:58.000Z
2022-02-24T04:22:02.000Z
from abc import ABC class Index(ABC): """ Base class for an index in Grizzly. """ pass
10.6
39
0.575472
15
106
4.066667
0.733333
0
0
0
0
0
0
0
0
0
0
0
0.330189
106
9
40
11.777778
0.859155
0.330189
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
ddbbdc7b1e6e50953351ff74a0aa07962f6666af
58
py
Python
jd/api/__init__.py
fengjinqi/linjuanbang
8cdc4e81df73ccd737ac547da7f2c7dca545862a
[ "MIT" ]
5
2019-10-30T01:16:30.000Z
2020-06-14T03:32:19.000Z
jd/api/__init__.py
fengjinqi/linjuanbang
8cdc4e81df73ccd737ac547da7f2c7dca545862a
[ "MIT" ]
2
2020-10-12T07:12:48.000Z
2021-06-02T03:15:47.000Z
jd/api/__init__.py
fengjinqi/linjuanbang
8cdc4e81df73ccd737ac547da7f2c7dca545862a
[ "MIT" ]
3
2019-12-06T17:33:49.000Z
2021-03-01T13:24:22.000Z
from jd.api.rest import * from jd.api.base import FileItem
29
32
0.793103
11
58
4.181818
0.636364
0.26087
0.391304
0
0
0
0
0
0
0
0
0
0.12069
58
2
32
29
0.901961
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
fb06cfeab0c901cc01b748c3466219b1ee6e39dc
120
py
Python
musictools/custom_exceptions.py
zfazli/zfmusicc
7620b70699f26837b30dc4039cb997ec3c2b5cc3
[ "MIT" ]
49
2017-03-14T14:35:31.000Z
2017-04-07T09:15:29.000Z
musictools/custom_exceptions.py
zulfazliansyah/music
7620b70699f26837b30dc4039cb997ec3c2b5cc3
[ "MIT" ]
5
2017-09-13T02:58:01.000Z
2021-07-12T10:23:48.000Z
musictools/custom_exceptions.py
zulfazliansyah/music
7620b70699f26837b30dc4039cb997ec3c2b5cc3
[ "MIT" ]
7
2017-06-21T13:21:20.000Z
2020-09-11T21:31:36.000Z
class SongNotFound(Exception): def __init__(self, message, dErrorArg): Exception.__init__(self, message, dErrorArg)
24
46
0.783333
13
120
6.615385
0.615385
0.186047
0.348837
0.55814
0
0
0
0
0
0
0
0
0.108333
120
4
47
30
0.803738
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
6
fb12e527ea3fb00741b7c06ae9f7e2c83dd9fa0b
121
py
Python
blog/views/post_view.py
ShahadatShuvo/blood_lagbe
c5edb52bf20c084425e2a89e3bddc7e5705edf30
[ "Apache-2.0" ]
3
2021-04-24T16:30:09.000Z
2021-06-19T08:02:22.000Z
blog/views/post_view.py
ShahadatShuvo/blood_lagbe
c5edb52bf20c084425e2a89e3bddc7e5705edf30
[ "Apache-2.0" ]
16
2021-04-24T07:44:34.000Z
2021-04-28T17:12:25.000Z
blog/views/post_view.py
ShahadatShuvo/blood_lagbe
c5edb52bf20c084425e2a89e3bddc7e5705edf30
[ "Apache-2.0" ]
4
2021-04-24T23:42:51.000Z
2021-06-20T16:53:00.000Z
from django.shortcuts import render def postView(request, id): return render(request, 'blog/blog.html', context={})
24.2
56
0.735537
16
121
5.5625
0.8125
0
0
0
0
0
0
0
0
0
0
0
0.132231
121
5
56
24.2
0.847619
0
0
0
0
0
0.114754
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
fb1dcdfba92bbedd533afca86a61f62410952118
159
py
Python
backend/schema.py
ReynaldoCC/arango-backend
fe7e28b7ee266ce9b50054758018cfad976bc7c3
[ "BSD-3-Clause" ]
null
null
null
backend/schema.py
ReynaldoCC/arango-backend
fe7e28b7ee266ce9b50054758018cfad976bc7c3
[ "BSD-3-Clause" ]
null
null
null
backend/schema.py
ReynaldoCC/arango-backend
fe7e28b7ee266ce9b50054758018cfad976bc7c3
[ "BSD-3-Clause" ]
null
null
null
from abc import ABC from django.db.backends.base.schema import BaseDatabaseSchemaEditor class DatabaseSchemaEditor(BaseDatabaseSchemaEditor, ABC): pass
19.875
67
0.830189
17
159
7.764706
0.705882
0
0
0
0
0
0
0
0
0
0
0
0.119497
159
7
68
22.714286
0.942857
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.25
0.5
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
34bb50770b6d16ee92d7df01119fd24059f26213
21
py
Python
Unidad 2/packages/extra/ugly/omega.py
angelxehg/utzac-ppy
fb88bcc661518bb35c08a102a67c20d0659f71db
[ "MIT" ]
null
null
null
Unidad 2/packages/extra/ugly/omega.py
angelxehg/utzac-ppy
fb88bcc661518bb35c08a102a67c20d0659f71db
[ "MIT" ]
null
null
null
Unidad 2/packages/extra/ugly/omega.py
angelxehg/utzac-ppy
fb88bcc661518bb35c08a102a67c20d0659f71db
[ "MIT" ]
null
null
null
def funO(): pass
7
11
0.52381
3
21
3.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.333333
21
2
12
10.5
0.785714
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
0
0
0
0
0
6
55069127b3a4bb0fc89225d0f89824fe31ac92cf
384
py
Python
utils/__init__.py
JacobChen258/AI-Constraints-Satisfaction
9b01cfce447e40678eb2e426413b4e2e437257f0
[ "MIT" ]
null
null
null
utils/__init__.py
JacobChen258/AI-Constraints-Satisfaction
9b01cfce447e40678eb2e426413b4e2e437257f0
[ "MIT" ]
null
null
null
utils/__init__.py
JacobChen258/AI-Constraints-Satisfaction
9b01cfce447e40678eb2e426413b4e2e437257f0
[ "MIT" ]
null
null
null
from .directions import Direction from .directions import direction_to_vector from .directions import vector_to_direction from .constants import ASSETS from .constants import LINE_LIMIT from .constants import TILESIZE from .constants import TETROMINO_GRID_SIZE from .constants import BORDER from .utils import load_grid from .matrix_util import MatrixUtil from .gframe import GFrame
32
43
0.854167
53
384
6.018868
0.396226
0.203762
0.297806
0.181818
0
0
0
0
0
0
0
0
0.117188
384
11
44
34.909091
0.941003
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
9b64b2a7431b2238a96db3dce1afd55264830e4e
36
py
Python
src/melissa/__init__.py
aleksandrgordienko/melissa-quiz
49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f
[ "MIT" ]
null
null
null
src/melissa/__init__.py
aleksandrgordienko/melissa-quiz
49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f
[ "MIT" ]
null
null
null
src/melissa/__init__.py
aleksandrgordienko/melissa-quiz
49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f
[ "MIT" ]
null
null
null
from melissa.melissa import Melissa
18
35
0.861111
5
36
6.2
0.6
0
0
0
0
0
0
0
0
0
0
0
0.111111
36
1
36
36
0.96875
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
9b7c8b16f664fc8e6e0bdb34685ccbe413103c9b
82
py
Python
plugins/supervisor/__init__.py
ajenti/ajen
177c1a67278a7763ed06eb2f773d7b409a85ec77
[ "MIT" ]
3,777
2015-02-21T00:10:12.000Z
2022-03-30T15:33:22.000Z
plugins/supervisor/__init__.py
ajenti/ajen
177c1a67278a7763ed06eb2f773d7b409a85ec77
[ "MIT" ]
749
2015-03-12T14:17:03.000Z
2022-03-25T13:22:28.000Z
plugins/supervisor/__init__.py
ajenti/ajen
177c1a67278a7763ed06eb2f773d7b409a85ec77
[ "MIT" ]
687
2015-03-21T10:42:33.000Z
2022-03-21T23:18:12.000Z
# pyflakes: disable-all from .api import * from .aug import * from .main import *
16.4
23
0.707317
12
82
4.833333
0.666667
0.344828
0
0
0
0
0
0
0
0
0
0
0.182927
82
4
24
20.5
0.865672
0.256098
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
9b8c163ee58a3767776a54eaed4e118faa56927f
7,793
py
Python
foundation/organisation/migrations/0001_initial.py
Mindelirium/foundation
2d07e430915d696ca7376afea633692119c4d30e
[ "MIT" ]
null
null
null
foundation/organisation/migrations/0001_initial.py
Mindelirium/foundation
2d07e430915d696ca7376afea633692119c4d30e
[ "MIT" ]
null
null
null
foundation/organisation/migrations/0001_initial.py
Mindelirium/foundation
2d07e430915d696ca7376afea633692119c4d30e
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Person' db.create_table(u'organisation_person', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75)), ('photo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)), ('twitter', self.gf('django.db.models.fields.CharField')(max_length=18, blank=True)), ('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), )) db.send_create_signal(u'organisation', ['Person']) # Adding model 'Unit' db.create_table(u'organisation_unit', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), )) db.send_create_signal(u'organisation', ['Unit']) # Adding model 'UnitMembership' db.create_table(u'organisation_unitmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('unit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Unit'])), )) db.send_create_signal(u'organisation', ['UnitMembership']) # Adding model 'Board' db.create_table(u'organisation_board', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('description', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal(u'organisation', ['Board']) # Adding model 'BoardMembership' db.create_table(u'organisation_boardmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('board', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Board'])), )) db.send_create_signal(u'organisation', ['BoardMembership']) def backwards(self, orm): # Deleting model 'Person' db.delete_table(u'organisation_person') # Deleting model 'Unit' db.delete_table(u'organisation_unit') # Deleting model 'UnitMembership' db.delete_table(u'organisation_unitmembership') # Deleting model 'Board' db.delete_table(u'organisation_board') # Deleting model 'BoardMembership' db.delete_table(u'organisation_boardmembership') models = { u'organisation.board': { 'Meta': {'object_name': 'Board'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.boardmembership': { 'Meta': {'object_name': 'BoardMembership'}, 'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Board']"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.person': { 'Meta': {'object_name': 'Person'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) }, u'organisation.unit': { 'Meta': {'object_name': 'Unit'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.unitmembership': { 'Meta': {'object_name': 'UnitMembership'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Unit']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) } } complete_apps = ['organisation']
59.946154
115
0.603747
889
7,793
5.163105
0.086614
0.102832
0.176906
0.252723
0.820915
0.755991
0.712636
0.712636
0.694771
0.693464
0
0.00825
0.191197
7,793
130
116
59.946154
0.719975
0.035801
0
0.466019
0
0
0.485006
0.312675
0
0
0
0
0
1
0.019417
false
0
0.038835
0
0.087379
0
0
0
0
null
0
0
1
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
fd5aaecf58a2361f54fe46788333b514263a9988
39
py
Python
torchnet/containers/models/__init__.py
a5chin/torchnet
6895735a3def7be03b04ae330d06eaf7e6258f10
[ "MIT" ]
null
null
null
torchnet/containers/models/__init__.py
a5chin/torchnet
6895735a3def7be03b04ae330d06eaf7e6258f10
[ "MIT" ]
null
null
null
torchnet/containers/models/__init__.py
a5chin/torchnet
6895735a3def7be03b04ae330d06eaf7e6258f10
[ "MIT" ]
null
null
null
from .classification import Classifier
19.5
38
0.871795
4
39
8.5
1
0
0
0
0
0
0
0
0
0
0
0
0.102564
39
1
39
39
0.971429
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
b5cac31572b2496bccbfdfd8efd0fdf9ff3a7b3a
8,700
py
Python
backend/lib/google/cloud/grpc/datastore/v1/datastore_pb2_grpc.py
isaiah-solo/Droptalk
578a647adceecfae9d30ca6b98fdaae7077d683f
[ "MIT" ]
null
null
null
backend/lib/google/cloud/grpc/datastore/v1/datastore_pb2_grpc.py
isaiah-solo/Droptalk
578a647adceecfae9d30ca6b98fdaae7077d683f
[ "MIT" ]
null
null
null
backend/lib/google/cloud/grpc/datastore/v1/datastore_pb2_grpc.py
isaiah-solo/Droptalk
578a647adceecfae9d30ca6b98fdaae7077d683f
[ "MIT" ]
null
null
null
import grpc from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 class DatastoreStub(object): """Each RPC normalizes the partition IDs of the keys in its input entities, and always returns entities with keys with normalized partition IDs. This applies to all keys and entities, including those in values, except keys with both an empty path and an empty or unset partition ID. Normalization of input keys sets the project ID (if not already set) to the project ID from the request. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.Lookup = channel.unary_unary( '/google.datastore.v1.Datastore/Lookup', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.FromString, ) self.RunQuery = channel.unary_unary( '/google.datastore.v1.Datastore/RunQuery', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.FromString, ) self.BeginTransaction = channel.unary_unary( '/google.datastore.v1.Datastore/BeginTransaction', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.FromString, ) self.Commit = channel.unary_unary( '/google.datastore.v1.Datastore/Commit', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.FromString, ) self.Rollback = channel.unary_unary( '/google.datastore.v1.Datastore/Rollback', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.FromString, ) self.AllocateIds = channel.unary_unary( '/google.datastore.v1.Datastore/AllocateIds', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.FromString, ) class DatastoreServicer(object): """Each RPC normalizes the partition IDs of the keys in its input entities, and always returns entities with keys with normalized partition IDs. This applies to all keys and entities, including those in values, except keys with both an empty path and an empty or unset partition ID. Normalization of input keys sets the project ID (if not already set) to the project ID from the request. """ def Lookup(self, request, context): """Looks up entities by key. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def RunQuery(self, request, context): """Queries for entities. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def BeginTransaction(self, request, context): """Begins a new transaction. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Commit(self, request, context): """Commits a transaction, optionally creating, deleting or modifying some entities. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Rollback(self, request, context): """Rolls back a transaction. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def AllocateIds(self, request, context): """Allocates IDs for the given keys, which is useful for referencing an entity before it is inserted. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_DatastoreServicer_to_server(servicer, server): rpc_method_handlers = { 'Lookup': grpc.unary_unary_rpc_method_handler( servicer.Lookup, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.SerializeToString, ), 'RunQuery': grpc.unary_unary_rpc_method_handler( servicer.RunQuery, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.SerializeToString, ), 'BeginTransaction': grpc.unary_unary_rpc_method_handler( servicer.BeginTransaction, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.SerializeToString, ), 'Commit': grpc.unary_unary_rpc_method_handler( servicer.Commit, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.SerializeToString, ), 'Rollback': grpc.unary_unary_rpc_method_handler( servicer.Rollback, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.SerializeToString, ), 'AllocateIds': grpc.unary_unary_rpc_method_handler( servicer.AllocateIds, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'google.datastore.v1.Datastore', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,))
55.063291
139
0.811034
1,124
8,700
5.80605
0.118327
0.132394
0.07723
0.093779
0.824088
0.824088
0.824088
0.749617
0.749617
0.749617
0
0.013567
0.127356
8,700
157
140
55.414013
0.846022
0.128276
0
0.333333
0
0
0.080358
0.036101
0
0
0
0
0
1
0.074074
false
0
0.138889
0
0.231481
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
bd3b5192e37e67b890d6f238c4fa601930e90daa
27,353
py
Python
post_optimization_studies/mad_analyses/ma100MeV_L2TeV_deta2_1/Output/Histos/MadAnalysis5job_0/selection_3.py
sheride/axion_pheno
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
[ "MIT" ]
null
null
null
post_optimization_studies/mad_analyses/ma100MeV_L2TeV_deta2_1/Output/Histos/MadAnalysis5job_0/selection_3.py
sheride/axion_pheno
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
[ "MIT" ]
null
null
null
post_optimization_studies/mad_analyses/ma100MeV_L2TeV_deta2_1/Output/Histos/MadAnalysis5job_0/selection_3.py
sheride/axion_pheno
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
[ "MIT" ]
null
null
null
def selection_3(): # Library import import numpy import matplotlib import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec # Library version matplotlib_version = matplotlib.__version__ numpy_version = numpy.__version__ # Histo binning xBinning = numpy.linspace(0.0,1000.0,101,endpoint=True) # Creating data sequence: middle of each bin xData = numpy.array([5.0,15.0,25.0,35.0,45.0,55.0,65.0,75.0,85.0,95.0,105.0,115.0,125.0,135.0,145.0,155.0,165.0,175.0,185.0,195.0,205.0,215.0,225.0,235.0,245.0,255.0,265.0,275.0,285.0,295.0,305.0,315.0,325.0,335.0,345.0,355.0,365.0,375.0,385.0,395.0,405.0,415.0,425.0,435.0,445.0,455.0,465.0,475.0,485.0,495.0,505.0,515.0,525.0,535.0,545.0,555.0,565.0,575.0,585.0,595.0,605.0,615.0,625.0,635.0,645.0,655.0,665.0,675.0,685.0,695.0,705.0,715.0,725.0,735.0,745.0,755.0,765.0,775.0,785.0,795.0,805.0,815.0,825.0,835.0,845.0,855.0,865.0,875.0,885.0,895.0,905.0,915.0,925.0,935.0,945.0,955.0,965.0,975.0,985.0,995.0]) # Creating weights for histo: y4_PT_0 y4_PT_0_weights = numpy.array([0.0,0.0,3.90559020362,3.4098395493,3.184383199,3.05922062016,2.90005937365,2.69067250471,2.59447024398,2.33587893166,2.18523204026,2.12003125671,1.8892599792,1.66270571154,1.62632415953,1.44260945213,1.39024714742,1.25777804247,1.16363732442,1.0803099133,0.944656967911,0.902964485484,0.808927284215,0.7608055704,0.720230589587,0.684924972608,0.648649335451,0.502277007629,0.526816080088,0.464822718018,0.461619693006,0.412484793714,0.39755051973,0.337661545893,0.325900960506,0.28000465763,0.255365785403,0.250059771033,0.22869290836,0.223345647149,0.201964196204,0.200894176418,0.179527673455,0.150678106305,0.15068326216,0.137849059879,0.12715281884,0.107934748702,0.112197401854,0.116476082122,0.0822722593839,0.0780036110513,0.0790783470457,0.0812097935254,0.0737402784453,0.0630358040256,0.0448726458182,0.061973338167,0.0545157734797,0.0576995339793,0.039533074426,0.0363344179017,0.0406034379355,0.0459482611057,0.0416654641475,0.0384728147938,0.0203040730752,0.0320609174698,0.0256479210294,0.0224402757314,0.0245828332786,0.0256441920272,0.0160344015652,0.0213785812527,0.0213699282091,0.0128207331093,0.0149610924238,0.0117553096284,0.0128248937644,0.0160306765598,0.0138933747873,0.00962064173846,0.00641112994094,0.0160276110243,0.00641375982671,0.00855215671885,0.00427087054611,0.00748071407694,0.00641112994094,0.00427569466788,0.00106771763653,0.00534417968749,0.00320688031303,0.00213916307619,0.00427383216519,0.00855292410193,0.00534154980172,0.00106958133825,0.00106771763653,0.00213543527306]) # Creating weights for histo: y4_PT_1 y4_PT_1_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y4_PT_2 y4_PT_2_weights = numpy.array([0.0,0.0,1.05462838872,0.0,1.0521138287,0.0,1.0529581672,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y4_PT_3 y4_PT_3_weights = numpy.array([0.0,0.0,1.15196926193,4.14546686726,2.07128556314,2.30314234879,2.99532358213,1.38295763269,1.15081073498,1.84150989919,1.84187686212,1.61263992282,2.53659494287,0.461124849562,1.15077768911,0.691380062969,0.460707165054,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y4_PT_4 y4_PT_4_weights = numpy.array([0.0,0.0,2.35403599972,1.41266419099,2.07671152374,1.99354845762,1.85609859839,1.74406233686,1.49540870465,1.74443741835,1.8551733974,1.57821630893,1.8827986294,1.79972135115,1.32958075756,1.57824439195,1.46788080148,1.41290885953,1.35653545928,1.27375786152,0.969080905097,0.636699853394,1.02415209938,0.747512002845,0.221556745765,0.36000362928,0.360011284789,0.166236651256,0.0828748881291,0.0277222376876,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y4_PT_5 y4_PT_5_weights = numpy.array([0.0,0.0,0.816624299929,0.847103616213,0.695752625962,0.55432358588,0.665464465348,0.554366550392,0.594913641312,0.796518364615,0.675453714448,0.665563987664,0.766337615281,0.685355578101,0.654902962926,0.625159140836,0.675573262597,0.614919872257,0.645341538418,0.544521487282,0.625120909702,0.504131447432,0.544501764871,0.524144657076,0.705610190047,0.554607042429,0.443654163045,0.433346685268,0.494214716117,0.292538445978,0.413215748006,0.342808624486,0.221770189954,0.161297092786,0.130998008991,0.191592110731,0.0705828653668,0.0503668486607,0.0403519907686,0.0100846024034,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y4_PT_6 y4_PT_6_weights = numpy.array([0.0,0.0,0.461223304328,0.401713933697,0.345168970089,0.339518636703,0.282930004188,0.257400083665,0.297117705146,0.274452849692,0.265985660171,0.274443577263,0.248954016813,0.200886592451,0.248980141208,0.217799810348,0.226396352175,0.203669167531,0.220590503613,0.240494291405,0.263167726742,0.22908173989,0.246120962784,0.229171463143,0.220680034492,0.294228593322,0.271553580518,0.280044047298,0.260194932209,0.288645244576,0.282900878759,0.229198318559,0.257496540009,0.277205414422,0.288555521324,0.243280329218,0.240484018631,0.183940978764,0.260225519682,0.229204589952,0.198067273928,0.20088170615,0.135804415171,0.124498169551,0.115999045939,0.0961765169427,0.0481013976183,0.0735709126931,0.0651046850421,0.0480742344037,0.0594057964477,0.0452637650872,0.0396098535436,0.0367849407841,0.0198036724931,0.0197992940599,0.0141489683684,0.0028271100558,0.00566047149844,0.00565071043959,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y4_PT_7 y4_PT_7_weights = numpy.array([0.0,0.0,0.0578886571164,0.0654949359643,0.047269924506,0.0381081870264,0.0274217245724,0.0380153952501,0.0290052464243,0.0380934759909,0.0259196392317,0.0228540839485,0.0274196567562,0.00456656063309,0.0106596009304,0.0137082272975,0.0151919858784,0.0151841163607,0.016728456049,0.0121775233802,0.0228492866148,0.0152297028464,0.0121575541835,0.0197920262287,0.0137063130905,0.00912570430477,0.0136821137326,0.0136781789737,0.0076256276998,0.0228986896985,0.0182696999211,0.0228629223858,0.0152104189831,0.0212864074769,0.0304524223821,0.0274217363885,0.0228417006833,0.0197749756069,0.0213510060559,0.0303954097346,0.0320008504385,0.02895551249,0.0304396373697,0.0303826365384,0.0472342871703,0.0365619803954,0.050328827329,0.0425762888852,0.0442239248562,0.039660845244,0.0335054053426,0.0380489293213,0.0395747531924,0.0411030111785,0.0548946364586,0.057941616844,0.0320023747145,0.0380960637153,0.054860287077,0.0441785156118,0.0274625491728,0.031996324875,0.0198249104148,0.0274270418142,0.0106799529685,0.0152205571907,0.0167747869486,0.0197493465017,0.010646577233,0.010684560063,0.00610276966803,0.00457786981545,0.0,0.00153528737971,0.00610901329144,0.00152607673542,0.00302457943684,0.003052912064,0.00151690862907,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y4_PT_8 y4_PT_8_weights = numpy.array([0.0,0.0,0.00830935147317,0.00686212867026,0.0057777918495,0.00487499237596,0.00343193664794,0.0039737719234,0.00252637273229,0.00234767030544,0.00162461810849,0.00252753697142,0.00216603821697,0.00162427804128,0.00126508311697,0.00162446251715,0.00126490672877,0.00108334627393,0.00162441360601,0.00216564115096,0.00198639299905,0.001985104749,0.000901942565811,0.000722357812845,0.00108287449327,0.00126317211644,0.0018061512193,0.000721643787256,0.00252674938656,0.00162505330208,0.00162595103328,0.00108327849157,0.00126367932879,0.00108453400582,0.00144331453341,0.00216528644893,0.00198653357043,0.00234658540248,0.00198651855048,0.000722846924224,0.00253033029806,0.00234643943932,0.00325011469182,0.00162371036398,0.00343175448284,0.00360926417115,0.0016237034317,0.00324671517517,0.00433281048613,0.00379432543106,0.00451221808065,0.00325053216957,0.00216682657208,0.00361096912869,0.00632018170794,0.00415376105979,0.00415287141626,0.00270884940443,0.00740285092848,0.00559668969588,0.00613692284273,0.00758210832344,0.00866876360916,0.00938883569284,0.0079450212645,0.00866896002397,0.00938716039008,0.0131790021367,0.00848917885619,0.00957163240558,0.0121009891024,0.0106549216807,0.0106480856752,0.00974956881468,0.00957010730238,0.00902793311509,0.0113765304214,0.0108339942147,0.0106544595282,0.00975270374903,0.00993226539437,0.0093889589335,0.00848701444206,0.00559722117123,0.00740305119456,0.0061388715857,0.00794359629434,0.00325065078871,0.00541598422314,0.00343001717462,0.00523492058222,0.0066811227984,0.00379199310152,0.00397064854286,0.00288920710742,0.00307069746913,0.00306901523409,0.00234680723568,0.00289091437573,0.00271114129562]) # Creating weights for histo: y4_PT_9 y4_PT_9_weights = numpy.array([0.0,0.0,0.012170493784,0.0242554668822,0.0121753353338,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y4_PT_10 y4_PT_10_weights = numpy.array([0.0,0.0,0.120516882096,0.190667418918,0.170688939005,0.230969775691,0.220865995678,0.150608273824,0.0702865634698,0.0100459438961,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y4_PT_11 y4_PT_11_weights = numpy.array([0.0,0.0,0.198084037561,0.31351194449,0.445496209225,0.467619888339,0.583054986195,0.577571193396,0.693118827227,0.555574269604,0.544584339317,0.494883658319,0.50052345767,0.373980165186,0.324499762188,0.247559646607,0.176019389051,0.115393049215,0.0439894192711,0.0274758738871,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y4_PT_12 y4_PT_12_weights = numpy.array([0.0,0.0,0.107569510644,0.159834387706,0.237835485183,0.296068725784,0.341467706071,0.373026348684,0.375033292643,0.39866176095,0.369072103919,0.34638109128,0.41743953349,0.372032175906,0.369074949795,0.368120939945,0.350345637316,0.399686516877,0.307892219056,0.316796364428,0.279250118549,0.20230063245,0.190451887037,0.180576456145,0.117451715524,0.0858512665881,0.0631590514663,0.0434230603867,0.0148022518436,0.00986973513175,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y4_PT_13 y4_PT_13_weights = numpy.array([0.0,0.0,0.0287386225353,0.0471453137393,0.064036059712,0.0892481061597,0.0995732117025,0.105116064126,0.112683447048,0.128803190991,0.120988463256,0.141919667971,0.140142882391,0.131331293443,0.140156966081,0.149220140954,0.143433424629,0.143181558631,0.137888531671,0.148983358908,0.141911065717,0.13007164337,0.126282850572,0.120244948429,0.123264339616,0.120001604664,0.104859476891,0.104607890967,0.102586681338,0.10411896285,0.0909966843491,0.0791581022219,0.0579749114769,0.0524290982777,0.0395763103928,0.0342805427144,0.0289909606569,0.0138615482158,0.00882280788765,0.00377996328405,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y4_PT_14 y4_PT_14_weights = numpy.array([0.0,0.0,0.0151722744734,0.0257760572781,0.0277619033204,0.0352127451226,0.0395232531673,0.0400689339222,0.0503777511844,0.0518301437998,0.0460993661199,0.0529711944513,0.0472398568938,0.0466623430952,0.0466740105456,0.0497999774426,0.0483796878119,0.0552642833532,0.0577990894338,0.0569929755901,0.0509945763924,0.0558091842786,0.0501156484605,0.0460905380491,0.056957813274,0.0544144290679,0.0461057547239,0.0532696492313,0.0535481183788,0.0489525326307,0.0538078616183,0.0532481939199,0.0538143302047,0.0501142287707,0.0609924216098,0.0526891660816,0.0506668380116,0.0483805876152,0.0509633732111,0.0418048545796,0.04465198241,0.0426608875147,0.0338083720137,0.0289025640571,0.0291987393353,0.0220538906659,0.0231870930324,0.0186147222113,0.0157373709855,0.0123215574274,0.0134671070957,0.00801535744122,0.010879552542,0.00544157087883,0.00629035739746,0.0048590431759,0.00315123337563,0.00228740414421,0.000570437544937,0.000573476980743,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y4_PT_15 y4_PT_15_weights = numpy.array([0.0,0.0,0.00203023686697,0.00330505940149,0.00457813469553,0.00444700574288,0.00433982433676,0.00500579160733,0.00468589865485,0.00490187501373,0.00490069733424,0.00472927162631,0.00429701296335,0.00466560568666,0.00451344195349,0.00410183752552,0.00518109820923,0.00488013194909,0.00453648423748,0.00446931878763,0.00434253174226,0.00468651054526,0.00455639163089,0.00449244908325,0.00427686249019,0.00412531609576,0.00433829041971,0.00485652806705,0.0049028431417,0.00457613557413,0.00472786344016,0.00451228103812,0.00516050348687,0.00550521572015,0.0055270174592,0.00546343534016,0.00582993254838,0.00587162910776,0.00602748932957,0.00664594708189,0.00675932702167,0.006825587209,0.00691048909863,0.00662695333306,0.00715041300402,0.00695376567639,0.00762135906668,0.00652188420615,0.00807411605831,0.00770777810924,0.00788062876744,0.00917801664672,0.00773262253621,0.00818296968548,0.0082691205021,0.00794606332182,0.0075794403827,0.00755999819364,0.00826910792901,0.00766849139222,0.00684624898785,0.00654375300169,0.00563605549171,0.00472775866441,0.00440546006059,0.00449218085732,0.00380075433564,0.00339095330796,0.00246257935053,0.00235287871616,0.00185822064288,0.00149050300667,0.00146861535149,0.00125084229797,0.000775942417429,0.000799464574381,0.000775939064605,0.000194590956758,0.00021602262785,6.48123121329e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y4_PT_16 y4_PT_16_weights = numpy.array([0.0,0.0,0.000508893869518,0.000879128628745,0.000992943272196,0.000737860541265,0.000507704851964,0.000738067423785,0.000709235159,0.000822779949229,0.000766849490227,0.000650196412665,0.000454346241198,0.000650855377302,0.000651784640709,0.000593312779211,0.000623069703412,0.000511161557381,0.000623539607427,0.000793008173447,0.000510494127343,0.000597687460833,0.000678863527872,0.000538564209004,0.000568217469172,0.000453062025012,0.000539805949669,0.000878340752387,0.000396081410735,0.0005928335187,0.000624078571292,0.000738070542617,0.000593818029988,0.00048155730743,0.000510886951653,0.000624431741882,0.000764373137654,0.000565531857827,0.000709441447457,0.000710160560996,0.00102190697347,0.000595592348338,0.00042465332731,0.000738077968407,0.000678803230454,0.000992078316133,0.000650647900719,0.000851640283501,0.00118927686262,0.000936860287459,0.000707942180381,0.0011320459992,0.00079299955953,0.000766869985408,0.00130461156535,0.00107612192638,0.00116265109718,0.00101848323856,0.00110836708442,0.00110386571881,0.00150483047781,0.00127272581557,0.00107824273211,0.00153224204038,0.00180962798417,0.00164447543603,0.0018163646612,0.0017347775024,0.00133396452656,0.00153737029121,0.00164394969007,0.00161533217915,0.00175967469236,0.00141904745043,0.00161779308608,0.00190046916293,0.00190158303148,0.00192184207277,0.00193016044314,0.00175371326785,0.00176071875848,0.00153214847542,0.00124928541717,0.00135856230864,0.00133031044363,0.00107794035393,0.00110429626614,0.000964978636758,0.000850867555755,0.000566123693319,0.000705250925435,0.000820178546347,0.000564525960263,0.000595245563928,0.000677627430807,0.000654230399025,0.000511285122533,0.000339645106893,0.000508386093972,0.000312252108798]) # Creating a new Canvas fig = plt.figure(figsize=(12,6),dpi=80) frame = gridspec.GridSpec(1,1,right=0.7) pad = fig.add_subplot(frame[0]) # Creating a new Stack pad.hist(x=xData, bins=xBinning, weights=y4_PT_0_weights+y4_PT_1_weights+y4_PT_2_weights+y4_PT_3_weights+y4_PT_4_weights+y4_PT_5_weights+y4_PT_6_weights+y4_PT_7_weights+y4_PT_8_weights+y4_PT_9_weights+y4_PT_10_weights+y4_PT_11_weights+y4_PT_12_weights+y4_PT_13_weights+y4_PT_14_weights+y4_PT_15_weights+y4_PT_16_weights,\ label="$bg\_vbf\_1600\_inf$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#e5e5e5", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y4_PT_0_weights+y4_PT_1_weights+y4_PT_2_weights+y4_PT_3_weights+y4_PT_4_weights+y4_PT_5_weights+y4_PT_6_weights+y4_PT_7_weights+y4_PT_8_weights+y4_PT_9_weights+y4_PT_10_weights+y4_PT_11_weights+y4_PT_12_weights+y4_PT_13_weights+y4_PT_14_weights+y4_PT_15_weights,\ label="$bg\_vbf\_1200\_1600$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#f2f2f2", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y4_PT_0_weights+y4_PT_1_weights+y4_PT_2_weights+y4_PT_3_weights+y4_PT_4_weights+y4_PT_5_weights+y4_PT_6_weights+y4_PT_7_weights+y4_PT_8_weights+y4_PT_9_weights+y4_PT_10_weights+y4_PT_11_weights+y4_PT_12_weights+y4_PT_13_weights+y4_PT_14_weights,\ label="$bg\_vbf\_800\_1200$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y4_PT_0_weights+y4_PT_1_weights+y4_PT_2_weights+y4_PT_3_weights+y4_PT_4_weights+y4_PT_5_weights+y4_PT_6_weights+y4_PT_7_weights+y4_PT_8_weights+y4_PT_9_weights+y4_PT_10_weights+y4_PT_11_weights+y4_PT_12_weights+y4_PT_13_weights,\ label="$bg\_vbf\_600\_800$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y4_PT_0_weights+y4_PT_1_weights+y4_PT_2_weights+y4_PT_3_weights+y4_PT_4_weights+y4_PT_5_weights+y4_PT_6_weights+y4_PT_7_weights+y4_PT_8_weights+y4_PT_9_weights+y4_PT_10_weights+y4_PT_11_weights+y4_PT_12_weights,\ label="$bg\_vbf\_400\_600$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#c1bfa8", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y4_PT_0_weights+y4_PT_1_weights+y4_PT_2_weights+y4_PT_3_weights+y4_PT_4_weights+y4_PT_5_weights+y4_PT_6_weights+y4_PT_7_weights+y4_PT_8_weights+y4_PT_9_weights+y4_PT_10_weights+y4_PT_11_weights,\ label="$bg\_vbf\_200\_400$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#bab5a3", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y4_PT_0_weights+y4_PT_1_weights+y4_PT_2_weights+y4_PT_3_weights+y4_PT_4_weights+y4_PT_5_weights+y4_PT_6_weights+y4_PT_7_weights+y4_PT_8_weights+y4_PT_9_weights+y4_PT_10_weights,\ label="$bg\_vbf\_100\_200$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#b2a596", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y4_PT_0_weights+y4_PT_1_weights+y4_PT_2_weights+y4_PT_3_weights+y4_PT_4_weights+y4_PT_5_weights+y4_PT_6_weights+y4_PT_7_weights+y4_PT_8_weights+y4_PT_9_weights,\ label="$bg\_vbf\_0\_100$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#b7a39b", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y4_PT_0_weights+y4_PT_1_weights+y4_PT_2_weights+y4_PT_3_weights+y4_PT_4_weights+y4_PT_5_weights+y4_PT_6_weights+y4_PT_7_weights+y4_PT_8_weights,\ label="$bg\_dip\_1600\_inf$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#ad998c", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y4_PT_0_weights+y4_PT_1_weights+y4_PT_2_weights+y4_PT_3_weights+y4_PT_4_weights+y4_PT_5_weights+y4_PT_6_weights+y4_PT_7_weights,\ label="$bg\_dip\_1200\_1600$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#9b8e82", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y4_PT_0_weights+y4_PT_1_weights+y4_PT_2_weights+y4_PT_3_weights+y4_PT_4_weights+y4_PT_5_weights+y4_PT_6_weights,\ label="$bg\_dip\_800\_1200$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#876656", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y4_PT_0_weights+y4_PT_1_weights+y4_PT_2_weights+y4_PT_3_weights+y4_PT_4_weights+y4_PT_5_weights,\ label="$bg\_dip\_600\_800$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#afcec6", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y4_PT_0_weights+y4_PT_1_weights+y4_PT_2_weights+y4_PT_3_weights+y4_PT_4_weights,\ label="$bg\_dip\_400\_600$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#84c1a3", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y4_PT_0_weights+y4_PT_1_weights+y4_PT_2_weights+y4_PT_3_weights,\ label="$bg\_dip\_200\_400$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#89a8a0", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y4_PT_0_weights+y4_PT_1_weights+y4_PT_2_weights,\ label="$bg\_dip\_100\_200$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#829e8c", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y4_PT_0_weights+y4_PT_1_weights,\ label="$bg\_dip\_0\_100$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#adbcc6", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y4_PT_0_weights,\ label="$signal$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#7a8e99", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") # Axis plt.rc('text',usetex=False) plt.xlabel(r"p_{T} [ j_{2} ] ( GeV ) ",\ fontsize=16,color="black") plt.ylabel(r"$\mathrm{Events}$ $(\mathcal{L}_{\mathrm{int}} = 40.0\ \mathrm{fb}^{-1})$ ",\ fontsize=16,color="black") # Boundary of y-axis ymax=(y4_PT_0_weights+y4_PT_1_weights+y4_PT_2_weights+y4_PT_3_weights+y4_PT_4_weights+y4_PT_5_weights+y4_PT_6_weights+y4_PT_7_weights+y4_PT_8_weights+y4_PT_9_weights+y4_PT_10_weights+y4_PT_11_weights+y4_PT_12_weights+y4_PT_13_weights+y4_PT_14_weights+y4_PT_15_weights+y4_PT_16_weights).max()*1.1 ymin=0 # linear scale #ymin=min([x for x in (y4_PT_0_weights+y4_PT_1_weights+y4_PT_2_weights+y4_PT_3_weights+y4_PT_4_weights+y4_PT_5_weights+y4_PT_6_weights+y4_PT_7_weights+y4_PT_8_weights+y4_PT_9_weights+y4_PT_10_weights+y4_PT_11_weights+y4_PT_12_weights+y4_PT_13_weights+y4_PT_14_weights+y4_PT_15_weights+y4_PT_16_weights) if x])/100. # log scale plt.gca().set_ylim(ymin,ymax) # Log/Linear scale for X-axis plt.gca().set_xscale("linear") #plt.gca().set_xscale("log",nonposx="clip") # Log/Linear scale for Y-axis plt.gca().set_yscale("linear") #plt.gca().set_yscale("log",nonposy="clip") # Legend plt.legend(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.) # Saving the image plt.savefig('../../HTML/MadAnalysis5job_0/selection_3.png') plt.savefig('../../PDF/MadAnalysis5job_0/selection_3.png') plt.savefig('../../DVI/MadAnalysis5job_0/selection_3.eps') # Running! if __name__ == '__main__': selection_3()
140.994845
1,762
0.754177
5,390
27,353
3.691651
0.187384
0.190471
0.280581
0.367876
0.392803
0.392803
0.390089
0.382652
0.364408
0.364408
0
0.523893
0.060505
27,353
193
1,763
141.725389
0.250409
0.046905
0
0.185841
0
0.00885
0.039909
0.007682
0
0
0
0
0
1
0.00885
false
0
0.035398
0
0.044248
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
1
0
0
0
1
1
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
1fd084450ab7989741cd1d8f31d5b1b6c83f3cdb
43
py
Python
altair/vegalite/v2/schema/__init__.py
hydrosquall/altair
ded897b0967a88a467828b1e2c133bd92862de23
[ "BSD-3-Clause" ]
null
null
null
altair/vegalite/v2/schema/__init__.py
hydrosquall/altair
ded897b0967a88a467828b1e2c133bd92862de23
[ "BSD-3-Clause" ]
null
null
null
altair/vegalite/v2/schema/__init__.py
hydrosquall/altair
ded897b0967a88a467828b1e2c133bd92862de23
[ "BSD-3-Clause" ]
null
null
null
from .core import * from .channels import *
21.5
23
0.744186
6
43
5.333333
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.162791
43
2
23
21.5
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
1fefd29342dc01347b09406a2334475730114bff
133
py
Python
scripts/npc/autogen_10308.py
hsienjan/SideQuest-Server
3e88debaf45615b759d999255908f99a15283695
[ "MIT" ]
null
null
null
scripts/npc/autogen_10308.py
hsienjan/SideQuest-Server
3e88debaf45615b759d999255908f99a15283695
[ "MIT" ]
null
null
null
scripts/npc/autogen_10308.py
hsienjan/SideQuest-Server
3e88debaf45615b759d999255908f99a15283695
[ "MIT" ]
null
null
null
# ObjectID: 1000002 # ParentID: 10308 # Character field ID when accessed: 4000032 # Object Position X: 2522 # Object Position Y: -22
22.166667
43
0.744361
18
133
5.5
0.888889
0.282828
0
0
0
0
0
0
0
0
0
0.227273
0.172932
133
5
44
26.6
0.672727
0.917293
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
1
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
1ff0515c6eb3e9318b37eb944d57d34d627b9e8c
3,841
py
Python
counter_attack/cli/commands/approximation_dataset.py
samuelemarro/anti-attacks
f63829ee26e24d40aecdd2d6cc6bd7026d11a016
[ "MIT" ]
null
null
null
counter_attack/cli/commands/approximation_dataset.py
samuelemarro/anti-attacks
f63829ee26e24d40aecdd2d6cc6bd7026d11a016
[ "MIT" ]
null
null
null
counter_attack/cli/commands/approximation_dataset.py
samuelemarro/anti-attacks
f63829ee26e24d40aecdd2d6cc6bd7026d11a016
[ "MIT" ]
null
null
null
import pathlib import click import torch from counter_attack import defenses, rejectors, training, utils from counter_attack.cli import definitions, options, parsing @click.group(name='approximation-dataset') def approximation_dataset(): pass @approximation_dataset.command(name='preprocessor') @options.global_options @options.dataset_options('train', 'train') @options.standard_model_options @options.pretrained_model_options @options.preprocessor_options @options.adversarial_dataset_options @options.approximation_dataset_options('preprocessor') def approximation_dataset_preprocessor(options): """ Generates the dataset to train a substitute model for models with preprocessors. Saves the labels predicted by the defended model, using the genuine dataset + an adversarial dataset. """ adversarial_loader = options['adversarial_loader'] approximation_dataset_path = options['approximation_dataset_path'] foolbox_model = options['foolbox_model'] genuine_loader = options['loader'] preprocessor = options['preprocessor'] defended_model = defenses.PreprocessorDefenseModel( foolbox_model, preprocessor) genuine_approximation_dataset = training.generate_approximation_dataset(defended_model, genuine_loader, 'Genuine Approximation Dataset') adversarial_approximation_dataset = training.generate_approximation_dataset(defended_model, adversarial_loader, 'Adversarial Approximation Dataset') approximation_dataset = genuine_approximation_dataset + adversarial_approximation_dataset utils.save_zip(approximation_dataset, approximation_dataset_path) @approximation_dataset.command(name='model') @options.global_options @options.dataset_options('train', 'train') @options.standard_model_options @options.custom_model_options @options.adversarial_dataset_options @options.approximation_dataset_options('model') def approximation_dataset_model(options): adversarial_loader = options['adversarial_loader'] approximation_dataset_path = options['approximation_dataset_path'] custom_foolbox_model = options['custom_foolbox_model'] genuine_loader = options['loader'] genuine_approximation_dataset = training.generate_approximation_dataset(custom_foolbox_model, genuine_loader, 'Genuine Approximation Dataset') adversarial_approximation_dataset = training.generate_approximation_dataset(custom_foolbox_model, adversarial_loader, 'Adversarial Approximation Dataset') approximation_dataset = genuine_approximation_dataset + adversarial_approximation_dataset utils.save_zip(approximation_dataset, approximation_dataset_path) @approximation_dataset.command(name='rejector') @options.global_options @options.dataset_options('train', 'train') @options.standard_model_options @options.pretrained_model_options @options.distance_options @options.counter_attack_options(False) @options.detector_options @options.rejector_options @options.adversarial_dataset_options @options.approximation_dataset_options('rejector') def approximation_dataset_rejector(options): adversarial_loader = options['adversarial_loader'] approximation_dataset_path = options['approximation_dataset_path'] foolbox_model = options['foolbox_model'] genuine_loader = options['loader'] rejector = options['rejector'] defended_model = rejectors.RejectorModel(foolbox_model, rejector) genuine_approximation_dataset = training.generate_approximation_dataset(defended_model, genuine_loader, 'Genuine Approximation Dataset') adversarial_approximation_dataset = training.generate_approximation_dataset(defended_model, adversarial_loader, 'Adversarial Approximation Dataset') approximation_dataset = genuine_approximation_dataset + adversarial_approximation_dataset utils.save_zip(approximation_dataset, approximation_dataset_path)
41.75
158
0.829732
399
3,841
7.621554
0.14787
0.328839
0.071029
0.071029
0.722789
0.720816
0.70832
0.706018
0.706018
0.607037
0
0
0.096329
3,841
91
159
42.208791
0.876116
0.047644
0
0.530303
0
0
0.138606
0.02728
0
0
0
0
0
1
0.060606
false
0.015152
0.075758
0
0.136364
0
0
0
0
null
1
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
951352401456a0ad5a4f14435c8689c5d005440b
968
py
Python
DQM/CSCMonitorModule/python/csc_dqm_masked_hw_cfi.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
852
2015-01-11T21:03:51.000Z
2022-03-25T21:14:00.000Z
DQM/CSCMonitorModule/python/csc_dqm_masked_hw_cfi.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
30,371
2015-01-02T00:14:40.000Z
2022-03-31T23:26:05.000Z
DQM/CSCMonitorModule/python/csc_dqm_masked_hw_cfi.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
3,240
2015-01-02T05:53:18.000Z
2022-03-31T17:24:21.000Z
import FWCore.ParameterSet.Config as cms #-------------------------- # Masked HW Elements #-------------------------- CSCMaskedHW = cms.untracked.vstring( # == Post LS1 - All ME4/2 chambers should be enabled # == mask most or ME+4/2 chambers, except 9,10,11,12,13 #'1,4,2,1,*,*,*', #'1,4,2,2,*,*,*', #'1,4,2,3,*,*,*', #'1,4,2,4,*,*,*', #'1,4,2,5,*,*,*', #'1,4,2,6,*,*,*', #'1,4,2,7,*,*,*', #'1,4,2,8,*,*,*', #'1,4,2,14,*,*,*', #'1,4,2,15,*,*,*', #'1,4,2,16,*,*,*', #'1,4,2,17,*,*,*', #'1,4,2,18,*,*,*', #'1,4,2,19,*,*,*', #'1,4,2,20,*,*,*', #'1,4,2,21,*,*,*', #'1,4,2,22,*,*,*', #'1,4,2,23,*,*,*', #'1,4,2,24,*,*,*', #'1,4,2,25,*,*,*', #'1,4,2,26,*,*,*', #'1,4,2,27,*,*,*', #'1,4,2,28,*,*,*', #'1,4,2,29,*,*,*', #'1,4,2,30,*,*,*', #'1,4,2,31,*,*,*', #'1,4,2,32,*,*,*', #'1,4,2,33,*,*,*', #'1,4,2,34,*,*,*', #'1,4,2,35,*,*,*', #'1,4,2,36,*,*,*', # == mask all ME-4/2 chambers #'2,4,2,*,*,*,*', )
22
57
0.334711
168
968
1.928571
0.363095
0.209877
0.287037
0.074074
0
0
0
0
0
0
0
0.201701
0.149793
968
43
58
22.511628
0.191981
0.764463
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
0
0
1
null
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
952ed6d84ab9007d29ee9db38bef13d4465845bd
152
py
Python
loop2.py
musaibnazir/MixedPy
b2911f06e2f99aba9fb5b6fa802471710196ba4b
[ "MIT" ]
null
null
null
loop2.py
musaibnazir/MixedPy
b2911f06e2f99aba9fb5b6fa802471710196ba4b
[ "MIT" ]
null
null
null
loop2.py
musaibnazir/MixedPy
b2911f06e2f99aba9fb5b6fa802471710196ba4b
[ "MIT" ]
null
null
null
num = 5 for i in range(0,num): for j in range(0,num-i-1): print(end=" ") for j in range(1,i+1): print(j," ",end="") print()
19
30
0.473684
29
152
2.482759
0.37931
0.291667
0.222222
0.305556
0
0
0
0
0
0
0
0.057692
0.315789
152
7
31
21.714286
0.634615
0
0
0
0
0
0.013158
0
0
0
0
0
0
1
0
false
0
0
0
0
0.428571
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
6
20fa196194e51243cbdb7f40115b79715cfb9074
139
py
Python
src/nitpick/style/__init__.py
jaysonsantos/nitpick
34d24993fed4de40c029d676a434761c19029860
[ "MIT" ]
null
null
null
src/nitpick/style/__init__.py
jaysonsantos/nitpick
34d24993fed4de40c029d676a434761c19029860
[ "MIT" ]
1
2021-03-30T09:40:53.000Z
2021-03-30T10:08:40.000Z
src/nitpick/style/__init__.py
jaysonsantos/nitpick
34d24993fed4de40c029d676a434761c19029860
[ "MIT" ]
null
null
null
"""Styles parsing and merging.""" from .cache import parse_cache_option from .core import Style __all__ = ("Style", "parse_cache_option")
23.166667
41
0.755396
19
139
5.105263
0.631579
0.206186
0.329897
0
0
0
0
0
0
0
0
0
0.122302
139
5
42
27.8
0.795082
0.194245
0
0
0
0
0.216981
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
1f5a6c2662f454cc1ac83a1c3606a8bd4b790ead
83
py
Python
app/ctr/__init__.py
ihong9059/flasky
c1cd3ef83f92fae165ec8794c0a2b7de23757f3d
[ "MIT" ]
null
null
null
app/ctr/__init__.py
ihong9059/flasky
c1cd3ef83f92fae165ec8794c0a2b7de23757f3d
[ "MIT" ]
null
null
null
app/ctr/__init__.py
ihong9059/flasky
c1cd3ef83f92fae165ec8794c0a2b7de23757f3d
[ "MIT" ]
null
null
null
from flask import Blueprint ctr = Blueprint('ctr', __name__) from . import views
13.833333
32
0.746988
11
83
5.272727
0.636364
0.413793
0
0
0
0
0
0
0
0
0
0
0.168675
83
5
33
16.6
0.84058
0
0
0
0
0
0.036145
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0.666667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
6
2f5eec7c331b21462246f58d6ec8f2ef3293a0f4
42
py
Python
src/ODM2Sensor/settings/production.py
UCHIC/ODM2Sensor
488630f3a6535d201c652d9fbcfa1e8269253e0c
[ "BSD-3-Clause" ]
7
2015-04-11T19:27:25.000Z
2020-10-16T09:14:09.000Z
src/ODM2Sensor/settings/production.py
UCHIC/ODM2Sensor
488630f3a6535d201c652d9fbcfa1e8269253e0c
[ "BSD-3-Clause" ]
193
2015-04-13T22:30:40.000Z
2018-06-19T19:49:05.000Z
src/ODM2Sensor/settings/production.py
UCHIC/ODM2Sensor
488630f3a6535d201c652d9fbcfa1e8269253e0c
[ "BSD-3-Clause" ]
5
2016-03-22T18:57:23.000Z
2018-03-23T00:25:29.000Z
# TODO: write configuration for production
42
42
0.833333
5
42
7
1
0
0
0
0
0
0
0
0
0
0
0
0.119048
42
1
42
42
0.945946
0.952381
0
null
0
null
0
0
null
0
0
1
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
1
0
0
0
1
0
0
0
0
0
0
6
2f7dcf4c22b4d1c0c7d38301f88b9ef4e705040d
4,294
py
Python
python-lib/modellightgbm/dku_lightgbm.py
shippeo/dss-plugin-model-lightgbm
9c2bf2e010775501d7ff2ffdf25d1b51c01a0187
[ "MIT" ]
3
2021-06-15T16:02:38.000Z
2021-12-08T06:38:47.000Z
python-lib/modellightgbm/dku_lightgbm.py
shippeo/dss-plugin-model-lightgbm
9c2bf2e010775501d7ff2ffdf25d1b51c01a0187
[ "MIT" ]
null
null
null
python-lib/modellightgbm/dku_lightgbm.py
shippeo/dss-plugin-model-lightgbm
9c2bf2e010775501d7ff2ffdf25d1b51c01a0187
[ "MIT" ]
1
2021-06-15T16:06:02.000Z
2021-06-15T16:06:02.000Z
from lightgbm import LGBMClassifier, LGBMRegressor class DkuLGBMClassifier(LGBMClassifier): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMClassifier, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=reg_lambda, random_state=random_state, n_jobs=n_jobs, silent=silent, importance_type=importance_type) def fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_class_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMClassifier, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_class_weight=eval_class_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds) class DkuLGBMRegressor(LGBMRegressor): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMRegressor, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=reg_lambda, random_state=random_state, n_jobs=n_jobs, silent=silent, importance_type=importance_type) def fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMRegressor, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds)
97.590909
196
0.688402
557
4,294
4.901257
0.131059
0.066667
0.083516
0.024908
0.931868
0.924542
0.924542
0.924542
0.924542
0.924542
0
0.024677
0.226129
4,294
44
197
97.590909
0.79687
0
0
0.685714
0
0
0.007916
0
0
0
0
0
0
1
0.114286
false
0
0.142857
0.057143
0.371429
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2f7eea72e07f1252862fdc72b203bccf4d49ed00
6,181
py
Python
examples/pruning_two_instances.py
laudv/veritas
ba1761cc333b08b4381afa720b24ace065a9f106
[ "Apache-2.0" ]
6
2020-10-29T10:20:48.000Z
2022-03-31T13:39:47.000Z
examples/pruning_two_instances.py
laudv/veritas
ba1761cc333b08b4381afa720b24ace065a9f106
[ "Apache-2.0" ]
1
2021-11-25T13:15:11.000Z
2021-12-08T09:23:24.000Z
examples/pruning_two_instances.py
laudv/veritas
ba1761cc333b08b4381afa720b24ace065a9f106
[ "Apache-2.0" ]
null
null
null
import xgboost as xgb import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Rectangle import veritas import veritas.xgb # Generate a random dataset np.random.seed(14) N = 2000 x = np.random.randint(0, 100, size=(N, 1)).astype(float) y = np.random.randint(0, 100, size=(N, 1)).astype(float) dist = np.sqrt(x**2 + y**2) s = x + y target = ((dist < 50) & (s > 20)) | ((x+2*y) > 200) # Plot the dataset #plt.plot(x[target], y[target], '.', color="blue") #plt.plot(x[~target], y[~target], '.', color="red") #plt.show() X = np.concatenate((x, y), axis=1) # Train a model using XGBoost xtrain = xgb.DMatrix(X, label=target, missing=None) params = { "learning_rate": 0.5, "max_depth": 4, "objective": "binary:hinge", "eval_metric": "error", "tree_method": "hist", "seed": 1, "nthread": 1, } bst = xgb.train(params, xtrain, 10, [(xtrain, "train")]) features = ["x", "y"] feat2id = {f : i for i, f in enumerate(features)} at = veritas.xgb.addtree_from_xgb_model(bst) at.base_score = 0.5 # Check whether our "AddTree"'s predictions and XGBoost's match pred_raw_at = np.array(at.predict(X)) pred_raw = bst.predict(xtrain, output_margin=True) print("max error", max(pred_raw_at - pred_raw), "(should be no more than float32 rounding error)") # Look in a 100×100 grid at the values produced by XGBoost Xv = np.zeros((100*100, 2)) for i, xv in enumerate(range(100)): for j, yv in enumerate(range(100)): Xv[i*100+j, 0:2] = [xv, yv] vs = bst.predict(xgb.DMatrix(Xv), output_margin=True) fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(16, 6)) pred = (pred_raw.reshape((N,1)) > 0.0) ax0.plot(x[pred&target], y[pred&target], '.', color="darkblue", alpha=0.5, label="true pos") ax0.plot(x[~pred&~target], y[~pred&~target], '.', color="darkred", alpha=0.5, label="true neg") ax0.plot(x[pred&~target], y[pred&~target], 'x', color="blue", label="false pos") ax0.plot(x[~pred&target], y[~pred&target], 'x', color="red", label="false neg") im = ax1.imshow(vs.reshape(100,100).T, origin="lower", cmap="Spectral") fig.colorbar(im, ax=ax1) plt.show() # EXAMPLE 1 # Use VERITAS to find the two output configurations # - one in box x: [25, 75], y: [50, 80] # - one in box x: [0, 50], y: [0, 50] # such that the difference in output is maximized opt = veritas.Optimizer(minimize=at, maximize=at, matches=set(), match_is_reuse=True) box0 = [ veritas.RealDomain(25, 75), veritas.RealDomain(50, 80), ] box1 = [ veritas.RealDomain(0, 50), veritas.RealDomain(0, 50), ] print("num reachable leafs before prune", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.prune_box(box0, 0) # prune instance0 (minimized) opt.prune_box(box1, 1) # prune instance1 (maximized) print("num reachable leafs after prune", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.steps(2000) print((opt.num_solutions(), opt.num_rejected(), opt.num_candidate_cliques(), opt.num_steps())) points = [] for sol in opt.solutions(): # convert Solution object to list of intervals indexes by feature id intervals = opt.solution_to_intervals(sol, 4) xv0 = sum(intervals[0][0])/2 # instance0: middle of first feature interval yv0 = sum(intervals[0][1])/2 # instance0: middle of second feature interval xv1 = sum(intervals[1][0])/2 # instance1: middle of first feature interval yv1 = sum(intervals[1][1])/2 # instance1: middle of second feature interval points.append([xv0, yv0, xv1, yv1, sol.output0, sol.output1]) points = np.array(points) print(points) #print(bst.predict(xgb.DMatrix(points), output_margin=True)) fig, ax = plt.subplots() m, M = abs(min(points[:,2])), max(points[:,2]) im = ax.imshow(vs.reshape(100,100).T, origin="lower", cmap="Spectral") ax.add_patch(Rectangle((0, 0), 50, 50, fill=False, color="blue")) ax.add_patch(Rectangle((25, 50), 50, 30, fill=False, color="red")) for p in points[:3]: # 3 best only l, = ax.plot([p[0], p[2]], [p[1], p[3]]) ax.scatter([p[0]], [p[1]], marker="v", color=l.get_color()) # min ax.scatter([p[2]], [p[3]], marker="^", color=l.get_color()) # max fig.colorbar(im, ax=ax) plt.show() # EXAMPLE 2 # Use VERITAS to find the two output configurations # - one in box x: [25, 75], y: [50, 80] # - one in box x: [0, 50], y: [0, 50] # such that the difference in output is maximized # This time, share attribute x between the two instances opt = veritas.Optimizer(minimize=at, maximize=at, matches=set([0]), match_is_reuse=True) box0 = [ veritas.RealDomain(25, 75), veritas.RealDomain(50, 80), ] box1 = [ veritas.RealDomain(0, 50), veritas.RealDomain(0, 50), ] print("num reachable leafs before prune", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.prune_box(box0, 0) # prune instance0 (minimized) opt.prune_box(box1, 1) # prune instance1 (maximized) print("num reachable leafs after prune", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.steps(2000) print((opt.num_solutions(), opt.num_rejected(), opt.num_candidate_cliques(), opt.num_steps())) points = [] for sol in opt.solutions(): # convert Solution object to list of intervals indexes by feature id intervals = opt.solution_to_intervals(sol, 4) xv0 = sum(intervals[0][0])/2 # instance0: middle of first feature interval yv0 = sum(intervals[0][1])/2 # instance0: middle of second feature interval xv1 = sum(intervals[1][0])/2 # instance1: middle of first feature interval yv1 = sum(intervals[1][1])/2 # instance1: middle of second feature interval points.append([xv0, yv0, xv1, yv1, sol.output0, sol.output1]) points = np.array(points) print(points) #print(bst.predict(xgb.DMatrix(points), output_margin=True)) fig, ax = plt.subplots() m, M = abs(min(points[:,2])), max(points[:,2]) im = ax.imshow(vs.reshape(100,100).T, origin="lower", cmap="Spectral") ax.add_patch(Rectangle((0, 0), 50, 50, fill=False, color="blue")) ax.add_patch(Rectangle((25, 50), 50, 30, fill=False, color="red")) for p in points[:3]: # 3 best only l, = ax.plot([p[0], p[2]], [p[1], p[3]]) ax.scatter([p[0]], [p[1]], marker="v", color=l.get_color()) # min ax.scatter([p[2]], [p[3]], marker="^", color=l.get_color()) # max fig.colorbar(im, ax=ax) plt.show()
36.791667
98
0.668986
1,019
6,181
4.002944
0.223749
0.007355
0.027458
0.011768
0.73572
0.727874
0.727874
0.715126
0.715126
0.672959
0
0.056439
0.145769
6,181
167
99
37.011976
0.715909
0.239605
0
0.567797
1
0
0.083942
0
0
0
0
0
0
1
0
false
0
0.050847
0
0.050847
0.076271
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
85cd1711c069b83c769da11d7c138d705bd98d3c
22,554
py
Python
Parabola/prop1_probs.py
pdcxs/ManimProjects
550a281e430a1a1568ae1978ccfe817bedcc9ef8
[ "WTFPL" ]
29
2019-12-09T13:57:37.000Z
2022-02-15T12:18:25.000Z
Parabola/prop1_probs.py
pdcxs/ManimProjects
550a281e430a1a1568ae1978ccfe817bedcc9ef8
[ "WTFPL" ]
1
2019-12-22T09:15:18.000Z
2019-12-23T02:16:43.000Z
Parabola/prop1_probs.py
pdcxs/ManimProjects
550a281e430a1a1568ae1978ccfe817bedcc9ef8
[ "WTFPL" ]
4
2020-04-16T12:50:09.000Z
2021-07-09T12:39:04.000Z
from manimlib.imports import * from ManimProjects.utils.Parabola import Parabola from ManimProjects.utils.geometry import CText class Prob1(Parabola): CONFIG = { 'x_min' : -5 } def construct(self): self.adjust_x_range() graph = self.get_graph(color=LIGHT_BROWN) directrix = self.get_directrix() focus = Dot().move_to(self.get_focus()) focus.set_fill(DARK_BROWN) focus.plot_depth = 1 focusLabel = TexMobject('F').scale(0.7) focusLabel.next_to(focus, RIGHT) self.play(*[ShowCreation(e) for\ e in [graph, directrix, focus, focusLabel]]) y_val = ValueTracker(8) p1 = Dot() p1.set_color(DARK_BLUE) p1.add_updater(lambda m:\ m.move_to(self.coords_to_point( self.func(y_val.get_value()), y_val.get_value() ))) p1.plot_depth = 1 p1Label = TexMobject('P_1').scale(0.7) p1Label.add_updater(lambda m:\ m.next_to(p1, RIGHT, buff=SMALL_BUFF)) p2 = Dot() p2.set_color(DARK_BLUE) p2.add_updater(lambda m:\ m.move_to(self.get_opposite(p1))) p2.plot_depth = 1 p2Label = TexMobject('P_2').scale(0.7) p2Label.add_updater(lambda m:\ m.next_to(p2, RIGHT, buff=SMALL_BUFF)) focus_chord = Line() focus_chord.add_updater(lambda m:\ m.put_start_and_end_on( p1.get_center(), self.get_opposite(p1) )) self.play(ShowCreation(p1), ShowCreation(p1Label)) self.play(ShowCreation(focus_chord)) self.play(ShowCreation(p2), ShowCreation(p2Label)) fc_def = CText('焦点弦') fc_def.move_to(focus_chord.get_center()) fc_def.shift(0.2 * RIGHT + 0.1 * DOWN) self.play(Write(fc_def)) self.wait(2) self.play(FadeOut(fc_def)) q_y = ValueTracker(2) q = Dot() q.set_fill(DARK_BLUE) q.plot_depth = 1 q.add_updater(lambda m:\ m.move_to(self.coords_to_point( self.func(q_y.get_value()), q_y.get_value() ))) qLabel = TexMobject('Q').scale(0.7) qLabel.add_updater(lambda m:\ m.next_to(q, LEFT, buff=SMALL_BUFF)) k1 = Dot() k1.set_fill(BLUE_E) k1.plot_depth = 1 k1.add_updater(lambda m:\ m.move_to(self.chord_to_directrix(p1, q))) k1Label = TexMobject('K_1').scale(0.7) k1Label.add_updater(lambda m:\ m.next_to(k1, LEFT, buff=SMALL_BUFF)) k2 = Dot() k2.set_fill(BLUE_E) k2.plot_depth = 1 k2.add_updater(lambda m:\ m.move_to(self.chord_to_directrix(p2, q))) k2Label = TexMobject('K_2').scale(0.7) k2Label.add_updater(lambda m:\ m.next_to(k2, LEFT, buff=SMALL_BUFF)) l1 = Line() l1.add_updater(lambda m:\ m.put_start_and_end_on( self.right(p1, q), self.chord_to_directrix(p1, q) )) l2 = Line() l2.add_updater(lambda m:\ m.put_start_and_end_on( self.right(p2, q), self.chord_to_directrix(p2, q) )) self.play(ShowCreation(q), ShowCreation(qLabel)) self.play(ShowCreation(l1), ShowCreation(l2)) self.play(*[ShowCreation(e) for e in [k1, k2, k1Label, k2Label]]) k1f = Line() k1f.add_updater(lambda m:\ m.put_start_and_end_on( k1.get_center(), focus.get_center() )) k2f = Line() k2f.add_updater(lambda m:\ m.put_start_and_end_on( k2.get_center(), focus.get_center() )) self.play(ShowCreation(k1f), ShowCreation(k2f)) self.wait(1) self.play(ApplyMethod(y_val.set_value, 5)) summary = TexMobject('K_1F \\perp K_2F').scale(2) summary.to_edge(RIGHT) self.wait(1) self.play(Write(summary)) self.wait(5) qf = Line() qf.add_updater(lambda m:\ m.put_start_and_end_on(q.get_center(), focus.get_center())) self.play(ShowCreation(qf)) self.wait(1) self.play(ApplyMethod(q_y.set_value, -1)) self.wait(1) self.play(ApplyMethod(y_val.set_value, 0.5)) self.wait(1) self.play(ApplyMethod(y_val.set_value, 3), ApplyMethod(q_y.set_value, 0.5)) self.wait(10) class Prob2(Parabola): CONFIG = { 'focus': 2, 'x_min': -4 } def construct(self): self.adjust_x_range() graph = self.get_graph(color=LIGHT_BROWN) directrix = self.get_directrix() focus = Dot().move_to(self.get_focus()) focus.set_fill(DARK_BROWN) focus.plot_depth = 1 focusLabel = TexMobject('F').scale(0.7) focusLabel.next_to(focus, RIGHT) self.play(*[ShowCreation(e) for\ e in [graph, directrix, focus, focusLabel]]) q1_y = ValueTracker(9) q1 = Dot() q1.set_fill(DARK_BLUE) q1.plot_depth = 1 q1.add_updater(lambda m:\ m.move_to(self.coords_to_point( self.func(q1_y.get_value()), q1_y.get_value() ))) q1_label = TexMobject('Q_1').scale(0.5) q1_label.add_updater(lambda m:\ m.next_to(q1, RIGHT, buff=SMALL_BUFF)) self.play(ShowCreation(q1), ShowCreation(q1_label)) q2 = Dot() q2.set_fill(DARK_BLUE) q2.plot_depth = 1 q2.add_updater(lambda m:\ m.move_to(self.get_opposite(q1))) q2_label = TexMobject('Q_2').scale(0.5) q2_label.add_updater(lambda m:\ m.next_to(q2, RIGHT, buff=SMALL_BUFF)) q1q2 = Line() q1q2.add_updater(lambda m:\ m.put_start_and_end_on( q1.get_center(), self.get_opposite(q1) )) self.play(*[ShowCreation(e) for e in\ [q2, q2_label, q1q2]]) p1_y = ValueTracker(2) p1 = Dot() p1.set_fill(DARK_BLUE) p1.plot_depth = 1 p1.add_updater(lambda m:\ m.move_to(self.coords_to_point( self.func(p1_y.get_value()), p1_y.get_value() ))) p1_label = TexMobject('P_1').scale(0.5) p1_label.add_updater(lambda m:\ m.next_to(p1, RIGHT, buff=SMALL_BUFF)) self.play(ShowCreation(p1), ShowCreation(p1_label)) p2 = Dot() p2.set_fill(DARK_BLUE) p2.plot_depth = 1 p2.add_updater(lambda m:\ m.move_to(self.get_opposite(p1))) p2_label = TexMobject('P_2').scale(0.5) p2_label.add_updater(lambda m:\ m.next_to(p2, RIGHT, buff=SMALL_BUFF)) p1p2 = Line() p1p2.add_updater(lambda m:\ m.put_start_and_end_on( p1.get_center(), self.get_opposite(p1) )) self.play(*[ShowCreation(e) for e in\ [p2, p2_label, p1p2]]) k1 = Dot() k1.set_fill(DARK_BROWN) k1.plot_depth = 1 k1.add_updater(lambda m:\ m.move_to(self.chord_to_directrix(p1, q1))) k1_label = TexMobject('K_1').scale(0.5) k1_label.add_updater(lambda m:\ m.next_to(k1, LEFT, buff=SMALL_BUFF)) p1q1 = Line() p1q1.add_updater(lambda m:\ m.put_start_and_end_on( self.right(p1, q1), self.chord_to_directrix(p1, q1) )) p2q2 = Line() p2q2.add_updater(lambda m:\ m.put_start_and_end_on( self.right(p2, q2), self.chord_to_directrix(p2, q2) )) self.play(*[ShowCreation(e) for e in \ [k1, k1_label, p1q1, p2q2]]) k2 = Dot() k2.set_fill(DARK_BROWN) k2.plot_depth = 1 k2.add_updater(lambda m:\ m.move_to(self.chord_to_directrix(p2, q1))) k2_label = TexMobject('K_2').scale(0.5) k2_label.add_updater(lambda m:\ m.next_to(k2, LEFT, buff=SMALL_BUFF)) p2q1 = Line() p2q1.add_updater(lambda m:\ m.put_start_and_end_on( self.right(p2, q1), self.chord_to_directrix(p2, q1) )) p1q2 = Line() p1q2.add_updater(lambda m:\ m.put_start_and_end_on( self.right(p1, q2), self.chord_to_directrix(p1, q2) )) self.play(*[ShowCreation(e) for e in \ [k2, k2_label, p2q1, p1q2]]) explain = CText('这些交点在准线上').scale(0.3) explain.to_edge(RIGHT) self.wait(2) self.play(Write(explain)) self.wait(5) self.play(ApplyMethod(q1_y.set_value, 0.5), ApplyMethod(p1_y.set_value, -3)) self.wait(3) self.play(ApplyMethod(q1_y.set_value, 3), ApplyMethod(p1_y.set_value, -9)) self.wait(10) class Prob3(Parabola): CONFIG = { 'focus': 2, 'x_min': -4 } def construct(self): self.adjust_x_range() graph = self.get_graph(color=LIGHT_BROWN) directrix = self.get_directrix() focus = Dot().move_to(self.get_focus()) focus.set_fill(DARK_BROWN) focus.plot_depth = 1 focusLabel = TexMobject('F').scale(0.7) focusLabel.next_to(focus, RIGHT) self.play(*[ShowCreation(e) for\ e in [graph, directrix, focus, focusLabel]]) q1_y = ValueTracker(9) q1 = Dot() q1.set_fill(DARK_BLUE) q1.plot_depth = 1 q1.add_updater(lambda m:\ m.move_to(self.coords_to_point( self.func(q1_y.get_value()), q1_y.get_value() ))) q1_label = TexMobject('Q_1').scale(0.5) q1_label.add_updater(lambda m:\ m.next_to(q1, RIGHT, buff=SMALL_BUFF)) self.play(ShowCreation(q1), ShowCreation(q1_label)) q2 = Dot() q2.set_fill(DARK_BLUE) q2.plot_depth = 1 q2.add_updater(lambda m:\ m.move_to(self.get_opposite(q1))) q2_label = TexMobject('Q_2').scale(0.5) q2_label.add_updater(lambda m:\ m.next_to(q2, RIGHT, buff=SMALL_BUFF)) q1q2 = Line() q1q2.add_updater(lambda m:\ m.put_start_and_end_on( q1.get_center(), self.get_opposite(q1) )) self.play(*[ShowCreation(e) for e in\ [q2, q2_label, q1q2]]) p1_y = ValueTracker(2) p1 = Dot() p1.set_fill(DARK_BLUE) p1.plot_depth = 1 p1.add_updater(lambda m:\ m.move_to(self.coords_to_point( self.func(p1_y.get_value()), p1_y.get_value() ))) p1_label = TexMobject('P_1').scale(0.5) p1_label.add_updater(lambda m:\ m.next_to(p1, RIGHT, buff=SMALL_BUFF)) self.play(ShowCreation(p1), ShowCreation(p1_label)) p2 = Dot() p2.set_fill(DARK_BLUE) p2.plot_depth = 1 p2.add_updater(lambda m:\ m.move_to(self.get_opposite(p1))) p2_label = TexMobject('P_2').scale(0.5) p2_label.add_updater(lambda m:\ m.next_to(p2, RIGHT, buff=SMALL_BUFF)) p1p2 = Line() p1p2.add_updater(lambda m:\ m.put_start_and_end_on( p1.get_center(), self.get_opposite(p1) )) self.play(*[ShowCreation(e) for e in\ [p2, p2_label, p1p2]]) k1 = Dot() k1.set_fill(DARK_BROWN) k1.plot_depth = 1 k1.add_updater(lambda m:\ m.move_to(self.chord_to_directrix(p1, q1))) k1_label = TexMobject('K_1').scale(0.5) k1_label.add_updater(lambda m:\ m.next_to(k1, LEFT, buff=SMALL_BUFF)) p1q1 = Line() p1q1.add_updater(lambda m:\ m.put_start_and_end_on( self.right(p1, q1), self.chord_to_directrix(p1, q1) )) p2q2 = Line() p2q2.add_updater(lambda m:\ m.put_start_and_end_on( self.right(p2, q2), self.chord_to_directrix(p2, q2) )) self.play(*[ShowCreation(e) for e in \ [k1, k1_label, p1q1, p2q2]]) k2 = Dot() k2.set_fill(DARK_BROWN) k2.plot_depth = 1 k2.add_updater(lambda m:\ m.move_to(self.chord_to_directrix(p2, q1))) k2_label = TexMobject('K_2').scale(0.5) k2_label.add_updater(lambda m:\ m.next_to(k2, LEFT, buff=SMALL_BUFF)) p2q1 = Line() p2q1.add_updater(lambda m:\ m.put_start_and_end_on( self.right(p2, q1), self.chord_to_directrix(p2, q1) )) p1q2 = Line() p1q2.add_updater(lambda m:\ m.put_start_and_end_on( self.right(p1, q2), self.chord_to_directrix(p1, q2) )) self.play(*[ShowCreation(e) for e in \ [k2, k2_label, p2q1, p1q2]]) k1f = Line() k1f.add_updater(lambda m:\ m.put_start_and_end_on( k1.get_center(), focus.get_center() )) k2f = Line() k2f.add_updater(lambda m:\ m.put_start_and_end_on( k2.get_center(), focus.get_center() )) explain = TexMobject('K_1F \\perp K_2F') explain.to_edge(RIGHT) self.wait(2) self.play(ShowCreation(k1f), ShowCreation(k2f)) self.wait(3) self.play(Write(explain)) self.wait(5) self.play(ApplyMethod(q1_y.set_value, 0.5), ApplyMethod(p1_y.set_value, -3)) self.wait(3) self.play(ApplyMethod(q1_y.set_value, 3), ApplyMethod(p1_y.set_value, -9)) self.wait(10) class Prob4(Parabola): CONFIG = { 'focus': 3, 'x_min': -10 } def construct(self): self.adjust_x_range() graph = self.get_graph(color=LIGHT_BROWN) directrix = self.get_directrix() focus = Dot().move_to(self.get_focus()) focus.set_fill(DARK_BROWN) focus.plot_depth = 1 focusLabel = TexMobject('F').scale(0.5) focusLabel.next_to(focus, RIGHT) self.play(*[ShowCreation(e) for\ e in [graph, directrix, focus, focusLabel]]) a = Dot() a.set_fill(DARK_BROWN) a.move_to(self.coords_to_point(0, 0)) a.plot_depth = 1 a_label = TexMobject('A').scale(0.5) a_label.next_to(a, RIGHT) self.play(*[ShowCreation(e) for e in [a, a_label]]) y_val = ValueTracker(8) m = Dot() m.set_fill(DARK_BLUE) m.plot_depth = 1 m.add_updater(lambda m:\ m.move_to(self.coords_to_point( -self.focus, y_val.get_value() ))) m_label = TexMobject('M').scale(0.5) m_label.add_updater(lambda l:\ l.next_to(m, LEFT)) p = Dot() p.set_fill(DARK_BLUE) p.plot_depth = 1 p.add_updater(lambda m:\ m.move_to(self.coords_to_point( self.func(y_val.get_value()), y_val.get_value() ))) p_label = TexMobject('P').scale(0.5) p_label.add_updater(lambda m:\ m.next_to(p, RIGHT)) self.play(*[ShowCreation(e) for e in\ [m, m_label, p, p_label]]) k = Dot() k.set_fill(DARK_BLUE) k.plot_depth = 1 k.add_updater(lambda m:\ m.move_to(self.chord_to_directrix( p, a ))) k_label = TexMobject('K').scale(0.5) k_label.add_updater(lambda m:\ m.next_to(k, LEFT)) pk = Line() pk.add_updater(lambda l:\ l.put_start_and_end_on( p.get_center(), self.chord_to_directrix(p, a) )) mp = Line() mp.add_updater(lambda l:\ l.put_start_and_end_on( m.get_center(), p.get_center() )) self.play(*[ShowCreation(e) for e in\ [k, k_label, pk, mp]]) kf = Line() kf.add_updater(lambda l:\ l.put_start_and_end_on( k.get_center(), focus.get_center() )) mf = Line() mf.add_updater(lambda l:\ l.put_start_and_end_on( m.get_center(), focus.get_center() )) self.play(ShowCreation(kf), ShowCreation(mf)) form = TexMobject('KF \\perp MF') form.scale(0.7) form.to_edge(RIGHT) self.play(Write(form)) af = DashedLine(a.get_center(), focus.get_center()) pf = DashedLine() def get_pf_extent(): vec = focus.get_center() - p.get_center() vec = normalize(vec) return focus.get_center() + 2 * vec pf.add_updater(lambda m:\ m.put_start_and_end_on( p.get_center(), get_pf_extent() )) self.play(ShowCreation(af), ShowCreation(pf)) self.wait(3) self.play(ApplyMethod(y_val.set_value, 2)) self.wait(3) self.play(ApplyMethod(y_val.set_value, -2)) self.wait(3) self.play(ApplyMethod(y_val.set_value, -8)) self.wait(10) class Prob5(Parabola): CONFIG = { 'focus': 3, 'x_min': -10 } def construct(self): self.adjust_x_range() graph = self.get_graph(color=LIGHT_BROWN) directrix = self.get_directrix() focus = Dot().move_to(self.get_focus()) focus.set_fill(DARK_BROWN) focus.plot_depth = 1 focusLabel = TexMobject('F').scale(0.5) focusLabel.next_to(focus, RIGHT + UP) self.play(*[ShowCreation(e) for\ e in [graph, directrix, focus, focusLabel]]) h_line = self.get_horizontal() x = Dot() x.set_fill(DARK_BROWN) x.plot_depth = 1 x.move_to(self.coords_to_point(-self.focus, 0)) x_label = TexMobject('X').scale(0.5) x_label.next_to(x, LEFT + UP) self.play(ShowCreation(h_line)) self.play(ShowCreation(x), ShowCreation(x_label)) y_val = ValueTracker(8) p = Dot() p.set_fill(DARK_BLUE) p.plot_depth = 1 p.add_updater(lambda m:\ m.move_to(self.coords_to_point( self.func(y_val.get_value()), y_val.get_value() ))) q = Dot() q.set_fill(DARK_BLUE) q.plot_depth = 1 q.add_updater(lambda m:\ m.move_to(self.coords_to_point( self.func(-y_val.get_value()), -y_val.get_value() ))) t = Dot() t.set_fill(DARK_BLUE) t.plot_depth = 1 t.add_updater(lambda m:\ m.move_to(self.coords_to_point( self.func(y_val.get_value()), 0 ))) p_label = TexMobject('P').scale(0.5) p_label.add_updater(lambda m:\ m.next_to(p, RIGHT)) q_label = TexMobject('Q').scale(0.5) q_label.add_updater(lambda m:\ m.next_to(q, RIGHT)) t_label = TexMobject('T').scale(0.5) t_label.add_updater(lambda m:\ m.next_to(t, RIGHT + UP)) pq = Line() pq.add_updater(lambda m:\ m.put_start_and_end_on( p.get_center(), self.coords_to_point( self.func(-y_val.get_value()), -y_val.get_value() ))) pt = Line() pt.add_updater(lambda m:\ m.put_start_and_end_on( p.get_center(), self.coords_to_point( self.func(y_val.get_value()), 0 ))) self.play(ShowCreation(p), ShowCreation(p_label)) self.play(ShowCreation(pt)) self.play(ShowCreation(t), ShowCreation(t_label)) label1 = CText('纵标线').scale(0.3)\ .next_to(pt, RIGHT) self.play(ShowCreation(label1)) self.wait() self.play(FadeOut(label1)) self.play(ShowCreation(pq)) self.remove(pt) self.play(ShowCreation(q), ShowCreation(q_label)) label2 = CText('双纵标线').scale(0.3)\ .next_to(t, RIGHT+DOWN) self.play(ShowCreation(label2)) self.wait() self.play(FadeOut(label2)) self.wait() inter = Dot() inter.set_fill(DARK_BLUE) inter.plot_depth = 1 inter.add_updater(lambda m:\ m.move_to( self.coords_to_point( 4*(self.focus**3)/(y_val.get_value()**2), 4*self.focus**2/y_val.get_value() ) if y_val.get_value() != 0 else self.coords_to_point(0, 0) )) inter_label = TexMobject("P'").scale(0.5) inter_label.add_updater(lambda m:\ m.next_to(inter, LEFT + UP, buff=SMALL_BUFF)) px = Line() px.add_updater(lambda m:\ m.put_start_and_end_on( self.right(p, inter), x.get_center() )) self.play(ShowCreation(px)) self.play(ShowCreation(inter), ShowCreation(inter_label)) self.wait() form = CText("P'Q经过焦点").shift(UP) form.scale(0.5) form.to_edge(RIGHT) self.play(Write(form)) interq = Line() interq.add_updater(lambda m:\ m.put_start_and_end_on( inter.get_center(), q.get_center() )) self.play(ShowCreation(interq)) self.wait(2) self.play(ApplyMethod(y_val.set_value, 4)) self.wait(2) self.play(ApplyMethod(y_val.set_value, -4)) self.wait(2) self.play(ApplyMethod(y_val.set_value, -9)) self.wait(2) self.play(ApplyMethod(y_val.set_value, 9)) self.wait(10)
29.027027
73
0.524652
2,958
22,554
3.756254
0.0524
0.069301
0.110881
0.110161
0.815768
0.767978
0.747727
0.745297
0.701737
0.694897
0
0.037219
0.349561
22,554
777
74
29.027027
0.720177
0
0
0.697205
0
0
0.007892
0
0
0
0
0
0
1
0.009317
false
0
0.004658
0
0.031056
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
c8128a8dd438ad2d493df254ee45c6a1a57355fd
95,273
py
Python
cottonformation/res/batch.py
MacHu-GWU/cottonformation-project
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
[ "BSD-2-Clause" ]
5
2021-07-22T03:45:59.000Z
2021-12-17T21:07:14.000Z
cottonformation/res/batch.py
MacHu-GWU/cottonformation-project
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
[ "BSD-2-Clause" ]
1
2021-06-25T18:01:31.000Z
2021-06-25T18:01:31.000Z
cottonformation/res/batch.py
MacHu-GWU/cottonformation-project
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
[ "BSD-2-Clause" ]
2
2021-06-27T03:08:21.000Z
2021-06-28T22:15:51.000Z
# -*- coding: utf-8 -*- """ This module """ import attr import typing from ..core.model import ( Property, Resource, Tag, GetAtt, TypeHint, TypeCheck, ) from ..core.constant import AttrMeta #--- Property declaration --- @attr.s class PropJobDefinitionAuthorizationConfig(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.AuthorizationConfig" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-authorizationconfig.html Property Document: - ``p_AccessPointId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-authorizationconfig.html#cfn-batch-jobdefinition-authorizationconfig-accesspointid - ``p_Iam``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-authorizationconfig.html#cfn-batch-jobdefinition-authorizationconfig-iam """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.AuthorizationConfig" p_AccessPointId: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "AccessPointId"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-authorizationconfig.html#cfn-batch-jobdefinition-authorizationconfig-accesspointid""" p_Iam: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "Iam"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-authorizationconfig.html#cfn-batch-jobdefinition-authorizationconfig-iam""" @attr.s class PropJobDefinitionResourceRequirement(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.ResourceRequirement" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-resourcerequirement.html Property Document: - ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-resourcerequirement.html#cfn-batch-jobdefinition-resourcerequirement-type - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-resourcerequirement.html#cfn-batch-jobdefinition-resourcerequirement-value """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.ResourceRequirement" p_Type: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "Type"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-resourcerequirement.html#cfn-batch-jobdefinition-resourcerequirement-type""" p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "Value"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-resourcerequirement.html#cfn-batch-jobdefinition-resourcerequirement-value""" @attr.s class PropJobDefinitionEnvironment(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.Environment" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-environment.html Property Document: - ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-environment.html#cfn-batch-jobdefinition-environment-name - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-environment.html#cfn-batch-jobdefinition-environment-value """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Environment" p_Name: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "Name"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-environment.html#cfn-batch-jobdefinition-environment-name""" p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "Value"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-environment.html#cfn-batch-jobdefinition-environment-value""" @attr.s class PropJobDefinitionVolumesHost(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.VolumesHost" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumeshost.html Property Document: - ``p_SourcePath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumeshost.html#cfn-batch-jobdefinition-volumeshost-sourcepath """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.VolumesHost" p_SourcePath: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "SourcePath"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumeshost.html#cfn-batch-jobdefinition-volumeshost-sourcepath""" @attr.s class PropJobQueueComputeEnvironmentOrder(Property): """ AWS Object Type = "AWS::Batch::JobQueue.ComputeEnvironmentOrder" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobqueue-computeenvironmentorder.html Property Document: - ``rp_ComputeEnvironment``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobqueue-computeenvironmentorder.html#cfn-batch-jobqueue-computeenvironmentorder-computeenvironment - ``rp_Order``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobqueue-computeenvironmentorder.html#cfn-batch-jobqueue-computeenvironmentorder-order """ AWS_OBJECT_TYPE = "AWS::Batch::JobQueue.ComputeEnvironmentOrder" rp_ComputeEnvironment: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: "ComputeEnvironment"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobqueue-computeenvironmentorder.html#cfn-batch-jobqueue-computeenvironmentorder-computeenvironment""" rp_Order: int = attr.ib( default=None, validator=attr.validators.instance_of(int), metadata={AttrMeta.PROPERTY_NAME: "Order"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobqueue-computeenvironmentorder.html#cfn-batch-jobqueue-computeenvironmentorder-order""" @attr.s class PropJobDefinitionSecret(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.Secret" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-secret.html Property Document: - ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-secret.html#cfn-batch-jobdefinition-secret-name - ``rp_ValueFrom``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-secret.html#cfn-batch-jobdefinition-secret-valuefrom """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Secret" rp_Name: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: "Name"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-secret.html#cfn-batch-jobdefinition-secret-name""" rp_ValueFrom: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: "ValueFrom"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-secret.html#cfn-batch-jobdefinition-secret-valuefrom""" @attr.s class PropJobDefinitionNetworkConfiguration(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.NetworkConfiguration" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-networkconfiguration.html Property Document: - ``p_AssignPublicIp``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-networkconfiguration.html#cfn-batch-jobdefinition-containerproperties-networkconfiguration-assignpublicip """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.NetworkConfiguration" p_AssignPublicIp: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "AssignPublicIp"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-networkconfiguration.html#cfn-batch-jobdefinition-containerproperties-networkconfiguration-assignpublicip""" @attr.s class PropJobDefinitionLogConfiguration(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.LogConfiguration" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html Property Document: - ``rp_LogDriver``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-logdriver - ``p_Options``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-options - ``p_SecretOptions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-secretoptions """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.LogConfiguration" rp_LogDriver: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: "LogDriver"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-logdriver""" p_Options: dict = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(dict)), metadata={AttrMeta.PROPERTY_NAME: "Options"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-options""" p_SecretOptions: typing.List[typing.Union['PropJobDefinitionSecret', dict]] = attr.ib( default=None, converter=PropJobDefinitionSecret.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionSecret), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "SecretOptions"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-secretoptions""" @attr.s class PropComputeEnvironmentLaunchTemplateSpecification(Property): """ AWS Object Type = "AWS::Batch::ComputeEnvironment.LaunchTemplateSpecification" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html Property Document: - ``p_LaunchTemplateId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-launchtemplateid - ``p_LaunchTemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-launchtemplatename - ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-version """ AWS_OBJECT_TYPE = "AWS::Batch::ComputeEnvironment.LaunchTemplateSpecification" p_LaunchTemplateId: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "LaunchTemplateId"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-launchtemplateid""" p_LaunchTemplateName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "LaunchTemplateName"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-launchtemplatename""" p_Version: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "Version"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-version""" @attr.s class PropJobDefinitionMountPoints(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.MountPoints" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html Property Document: - ``p_ContainerPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-containerpath - ``p_ReadOnly``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-readonly - ``p_SourceVolume``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-sourcevolume """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.MountPoints" p_ContainerPath: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "ContainerPath"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-containerpath""" p_ReadOnly: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: "ReadOnly"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-readonly""" p_SourceVolume: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "SourceVolume"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-sourcevolume""" @attr.s class PropSchedulingPolicyShareAttributes(Property): """ AWS Object Type = "AWS::Batch::SchedulingPolicy.ShareAttributes" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-shareattributes.html Property Document: - ``p_ShareIdentifier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-shareattributes.html#cfn-batch-schedulingpolicy-shareattributes-shareidentifier - ``p_WeightFactor``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-shareattributes.html#cfn-batch-schedulingpolicy-shareattributes-weightfactor """ AWS_OBJECT_TYPE = "AWS::Batch::SchedulingPolicy.ShareAttributes" p_ShareIdentifier: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "ShareIdentifier"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-shareattributes.html#cfn-batch-schedulingpolicy-shareattributes-shareidentifier""" p_WeightFactor: float = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(float)), metadata={AttrMeta.PROPERTY_NAME: "WeightFactor"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-shareattributes.html#cfn-batch-schedulingpolicy-shareattributes-weightfactor""" @attr.s class PropJobDefinitionEvaluateOnExit(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.EvaluateOnExit" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html Property Document: - ``rp_Action``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-action - ``p_OnExitCode``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onexitcode - ``p_OnReason``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onreason - ``p_OnStatusReason``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onstatusreason """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.EvaluateOnExit" rp_Action: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: "Action"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-action""" p_OnExitCode: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "OnExitCode"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onexitcode""" p_OnReason: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "OnReason"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onreason""" p_OnStatusReason: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "OnStatusReason"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onstatusreason""" @attr.s class PropJobDefinitionUlimit(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.Ulimit" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html Property Document: - ``rp_HardLimit``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-hardlimit - ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-name - ``rp_SoftLimit``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-softlimit """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Ulimit" rp_HardLimit: int = attr.ib( default=None, validator=attr.validators.instance_of(int), metadata={AttrMeta.PROPERTY_NAME: "HardLimit"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-hardlimit""" rp_Name: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: "Name"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-name""" rp_SoftLimit: int = attr.ib( default=None, validator=attr.validators.instance_of(int), metadata={AttrMeta.PROPERTY_NAME: "SoftLimit"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-softlimit""" @attr.s class PropJobDefinitionFargatePlatformConfiguration(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.FargatePlatformConfiguration" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-fargateplatformconfiguration.html Property Document: - ``p_PlatformVersion``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-fargateplatformconfiguration.html#cfn-batch-jobdefinition-containerproperties-fargateplatformconfiguration-platformversion """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.FargatePlatformConfiguration" p_PlatformVersion: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "PlatformVersion"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-fargateplatformconfiguration.html#cfn-batch-jobdefinition-containerproperties-fargateplatformconfiguration-platformversion""" @attr.s class PropJobDefinitionTimeout(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.Timeout" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-timeout.html Property Document: - ``p_AttemptDurationSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-timeout.html#cfn-batch-jobdefinition-timeout-attemptdurationseconds """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Timeout" p_AttemptDurationSeconds: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: "AttemptDurationSeconds"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-timeout.html#cfn-batch-jobdefinition-timeout-attemptdurationseconds""" @attr.s class PropJobDefinitionTmpfs(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.Tmpfs" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html Property Document: - ``rp_ContainerPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-containerpath - ``rp_Size``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-size - ``p_MountOptions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-mountoptions """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Tmpfs" rp_ContainerPath: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: "ContainerPath"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-containerpath""" rp_Size: int = attr.ib( default=None, validator=attr.validators.instance_of(int), metadata={AttrMeta.PROPERTY_NAME: "Size"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-size""" p_MountOptions: typing.List[TypeHint.intrinsic_str] = attr.ib( default=None, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "MountOptions"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-mountoptions""" @attr.s class PropJobDefinitionEfsVolumeConfiguration(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.EfsVolumeConfiguration" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html Property Document: - ``rp_FileSystemId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-filesystemid - ``p_AuthorizationConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-authorizationconfig - ``p_RootDirectory``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-rootdirectory - ``p_TransitEncryption``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-transitencryption - ``p_TransitEncryptionPort``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-transitencryptionport """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.EfsVolumeConfiguration" rp_FileSystemId: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: "FileSystemId"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-filesystemid""" p_AuthorizationConfig: typing.Union['PropJobDefinitionAuthorizationConfig', dict] = attr.ib( default=None, converter=PropJobDefinitionAuthorizationConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionAuthorizationConfig)), metadata={AttrMeta.PROPERTY_NAME: "AuthorizationConfig"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-authorizationconfig""" p_RootDirectory: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "RootDirectory"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-rootdirectory""" p_TransitEncryption: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "TransitEncryption"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-transitencryption""" p_TransitEncryptionPort: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: "TransitEncryptionPort"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-transitencryptionport""" @attr.s class PropJobDefinitionDevice(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.Device" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html Property Document: - ``p_ContainerPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-containerpath - ``p_HostPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-hostpath - ``p_Permissions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-permissions """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Device" p_ContainerPath: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "ContainerPath"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-containerpath""" p_HostPath: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "HostPath"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-hostpath""" p_Permissions: typing.List[TypeHint.intrinsic_str] = attr.ib( default=None, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "Permissions"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-permissions""" @attr.s class PropComputeEnvironmentEc2ConfigurationObject(Property): """ AWS Object Type = "AWS::Batch::ComputeEnvironment.Ec2ConfigurationObject" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-ec2configurationobject.html Property Document: - ``rp_ImageType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-ec2configurationobject.html#cfn-batch-computeenvironment-ec2configurationobject-imagetype - ``p_ImageIdOverride``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-ec2configurationobject.html#cfn-batch-computeenvironment-ec2configurationobject-imageidoverride """ AWS_OBJECT_TYPE = "AWS::Batch::ComputeEnvironment.Ec2ConfigurationObject" rp_ImageType: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: "ImageType"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-ec2configurationobject.html#cfn-batch-computeenvironment-ec2configurationobject-imagetype""" p_ImageIdOverride: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "ImageIdOverride"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-ec2configurationobject.html#cfn-batch-computeenvironment-ec2configurationobject-imageidoverride""" @attr.s class PropJobDefinitionVolumes(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.Volumes" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html Property Document: - ``p_EfsVolumeConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-efsvolumeconfiguration - ``p_Host``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-host - ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-name """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Volumes" p_EfsVolumeConfiguration: typing.Union['PropJobDefinitionEfsVolumeConfiguration', dict] = attr.ib( default=None, converter=PropJobDefinitionEfsVolumeConfiguration.from_dict, validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionEfsVolumeConfiguration)), metadata={AttrMeta.PROPERTY_NAME: "EfsVolumeConfiguration"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-efsvolumeconfiguration""" p_Host: typing.Union['PropJobDefinitionVolumesHost', dict] = attr.ib( default=None, converter=PropJobDefinitionVolumesHost.from_dict, validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionVolumesHost)), metadata={AttrMeta.PROPERTY_NAME: "Host"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-host""" p_Name: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "Name"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-name""" @attr.s class PropSchedulingPolicyFairsharePolicy(Property): """ AWS Object Type = "AWS::Batch::SchedulingPolicy.FairsharePolicy" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html Property Document: - ``p_ComputeReservation``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-computereservation - ``p_ShareDecaySeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-sharedecayseconds - ``p_ShareDistribution``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-sharedistribution """ AWS_OBJECT_TYPE = "AWS::Batch::SchedulingPolicy.FairsharePolicy" p_ComputeReservation: float = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(float)), metadata={AttrMeta.PROPERTY_NAME: "ComputeReservation"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-computereservation""" p_ShareDecaySeconds: float = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(float)), metadata={AttrMeta.PROPERTY_NAME: "ShareDecaySeconds"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-sharedecayseconds""" p_ShareDistribution: typing.List[typing.Union['PropSchedulingPolicyShareAttributes', dict]] = attr.ib( default=None, converter=PropSchedulingPolicyShareAttributes.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropSchedulingPolicyShareAttributes), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "ShareDistribution"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-sharedistribution""" @attr.s class PropComputeEnvironmentComputeResources(Property): """ AWS Object Type = "AWS::Batch::ComputeEnvironment.ComputeResources" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html Property Document: - ``rp_MaxvCpus``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-maxvcpus - ``rp_Subnets``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-subnets - ``rp_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-type - ``p_AllocationStrategy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-allocationstrategy - ``p_BidPercentage``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-bidpercentage - ``p_DesiredvCpus``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-desiredvcpus - ``p_Ec2Configuration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-ec2configuration - ``p_Ec2KeyPair``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-ec2keypair - ``p_ImageId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-imageid - ``p_InstanceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-instancerole - ``p_InstanceTypes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-instancetypes - ``p_LaunchTemplate``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-launchtemplate - ``p_MinvCpus``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-minvcpus - ``p_PlacementGroup``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-placementgroup - ``p_SecurityGroupIds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-securitygroupids - ``p_SpotIamFleetRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-spotiamfleetrole - ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-tags """ AWS_OBJECT_TYPE = "AWS::Batch::ComputeEnvironment.ComputeResources" rp_MaxvCpus: int = attr.ib( default=None, validator=attr.validators.instance_of(int), metadata={AttrMeta.PROPERTY_NAME: "MaxvCpus"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-maxvcpus""" rp_Subnets: typing.List[TypeHint.intrinsic_str] = attr.ib( default=None, validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list)), metadata={AttrMeta.PROPERTY_NAME: "Subnets"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-subnets""" rp_Type: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: "Type"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-type""" p_AllocationStrategy: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "AllocationStrategy"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-allocationstrategy""" p_BidPercentage: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: "BidPercentage"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-bidpercentage""" p_DesiredvCpus: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: "DesiredvCpus"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-desiredvcpus""" p_Ec2Configuration: typing.List[typing.Union['PropComputeEnvironmentEc2ConfigurationObject', dict]] = attr.ib( default=None, converter=PropComputeEnvironmentEc2ConfigurationObject.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropComputeEnvironmentEc2ConfigurationObject), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "Ec2Configuration"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-ec2configuration""" p_Ec2KeyPair: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "Ec2KeyPair"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-ec2keypair""" p_ImageId: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "ImageId"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-imageid""" p_InstanceRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "InstanceRole"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-instancerole""" p_InstanceTypes: typing.List[TypeHint.intrinsic_str] = attr.ib( default=None, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "InstanceTypes"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-instancetypes""" p_LaunchTemplate: typing.Union['PropComputeEnvironmentLaunchTemplateSpecification', dict] = attr.ib( default=None, converter=PropComputeEnvironmentLaunchTemplateSpecification.from_dict, validator=attr.validators.optional(attr.validators.instance_of(PropComputeEnvironmentLaunchTemplateSpecification)), metadata={AttrMeta.PROPERTY_NAME: "LaunchTemplate"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-launchtemplate""" p_MinvCpus: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: "MinvCpus"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-minvcpus""" p_PlacementGroup: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "PlacementGroup"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-placementgroup""" p_SecurityGroupIds: typing.List[TypeHint.intrinsic_str] = attr.ib( default=None, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "SecurityGroupIds"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-securitygroupids""" p_SpotIamFleetRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "SpotIamFleetRole"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-spotiamfleetrole""" p_Tags: dict = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(dict)), metadata={AttrMeta.PROPERTY_NAME: "Tags"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-tags""" @attr.s class PropJobDefinitionRetryStrategy(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.RetryStrategy" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-retrystrategy.html Property Document: - ``p_Attempts``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-retrystrategy.html#cfn-batch-jobdefinition-retrystrategy-attempts - ``p_EvaluateOnExit``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-retrystrategy.html#cfn-batch-jobdefinition-retrystrategy-evaluateonexit """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.RetryStrategy" p_Attempts: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: "Attempts"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-retrystrategy.html#cfn-batch-jobdefinition-retrystrategy-attempts""" p_EvaluateOnExit: typing.List[typing.Union['PropJobDefinitionEvaluateOnExit', dict]] = attr.ib( default=None, converter=PropJobDefinitionEvaluateOnExit.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionEvaluateOnExit), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "EvaluateOnExit"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-retrystrategy.html#cfn-batch-jobdefinition-retrystrategy-evaluateonexit""" @attr.s class PropJobDefinitionLinuxParameters(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.LinuxParameters" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html Property Document: - ``p_Devices``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-devices - ``p_InitProcessEnabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-initprocessenabled - ``p_MaxSwap``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-maxswap - ``p_SharedMemorySize``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-sharedmemorysize - ``p_Swappiness``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-swappiness - ``p_Tmpfs``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-tmpfs """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.LinuxParameters" p_Devices: typing.List[typing.Union['PropJobDefinitionDevice', dict]] = attr.ib( default=None, converter=PropJobDefinitionDevice.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionDevice), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "Devices"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-devices""" p_InitProcessEnabled: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: "InitProcessEnabled"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-initprocessenabled""" p_MaxSwap: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: "MaxSwap"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-maxswap""" p_SharedMemorySize: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: "SharedMemorySize"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-sharedmemorysize""" p_Swappiness: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: "Swappiness"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-swappiness""" p_Tmpfs: typing.List[typing.Union['PropJobDefinitionTmpfs', dict]] = attr.ib( default=None, converter=PropJobDefinitionTmpfs.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionTmpfs), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "Tmpfs"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-tmpfs""" @attr.s class PropJobDefinitionContainerProperties(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.ContainerProperties" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html Property Document: - ``rp_Image``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-image - ``p_Command``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-command - ``p_Environment``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-environment - ``p_ExecutionRoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-executionrolearn - ``p_FargatePlatformConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-fargateplatformconfiguration - ``p_InstanceType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-instancetype - ``p_JobRoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-jobrolearn - ``p_LinuxParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-linuxparameters - ``p_LogConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-logconfiguration - ``p_Memory``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-memory - ``p_MountPoints``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-mountpoints - ``p_NetworkConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-networkconfiguration - ``p_Privileged``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-privileged - ``p_ReadonlyRootFilesystem``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-readonlyrootfilesystem - ``p_ResourceRequirements``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-resourcerequirements - ``p_Secrets``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-secrets - ``p_Ulimits``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-ulimits - ``p_User``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-user - ``p_Vcpus``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-vcpus - ``p_Volumes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-volumes """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.ContainerProperties" rp_Image: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: "Image"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-image""" p_Command: typing.List[TypeHint.intrinsic_str] = attr.ib( default=None, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "Command"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-command""" p_Environment: typing.List[typing.Union['PropJobDefinitionEnvironment', dict]] = attr.ib( default=None, converter=PropJobDefinitionEnvironment.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionEnvironment), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "Environment"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-environment""" p_ExecutionRoleArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "ExecutionRoleArn"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-executionrolearn""" p_FargatePlatformConfiguration: typing.Union['PropJobDefinitionFargatePlatformConfiguration', dict] = attr.ib( default=None, converter=PropJobDefinitionFargatePlatformConfiguration.from_dict, validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionFargatePlatformConfiguration)), metadata={AttrMeta.PROPERTY_NAME: "FargatePlatformConfiguration"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-fargateplatformconfiguration""" p_InstanceType: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "InstanceType"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-instancetype""" p_JobRoleArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "JobRoleArn"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-jobrolearn""" p_LinuxParameters: typing.Union['PropJobDefinitionLinuxParameters', dict] = attr.ib( default=None, converter=PropJobDefinitionLinuxParameters.from_dict, validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionLinuxParameters)), metadata={AttrMeta.PROPERTY_NAME: "LinuxParameters"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-linuxparameters""" p_LogConfiguration: typing.Union['PropJobDefinitionLogConfiguration', dict] = attr.ib( default=None, converter=PropJobDefinitionLogConfiguration.from_dict, validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionLogConfiguration)), metadata={AttrMeta.PROPERTY_NAME: "LogConfiguration"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-logconfiguration""" p_Memory: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: "Memory"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-memory""" p_MountPoints: typing.List[typing.Union['PropJobDefinitionMountPoints', dict]] = attr.ib( default=None, converter=PropJobDefinitionMountPoints.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionMountPoints), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "MountPoints"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-mountpoints""" p_NetworkConfiguration: typing.Union['PropJobDefinitionNetworkConfiguration', dict] = attr.ib( default=None, converter=PropJobDefinitionNetworkConfiguration.from_dict, validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionNetworkConfiguration)), metadata={AttrMeta.PROPERTY_NAME: "NetworkConfiguration"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-networkconfiguration""" p_Privileged: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: "Privileged"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-privileged""" p_ReadonlyRootFilesystem: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: "ReadonlyRootFilesystem"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-readonlyrootfilesystem""" p_ResourceRequirements: typing.List[typing.Union['PropJobDefinitionResourceRequirement', dict]] = attr.ib( default=None, converter=PropJobDefinitionResourceRequirement.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionResourceRequirement), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "ResourceRequirements"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-resourcerequirements""" p_Secrets: typing.List[typing.Union['PropJobDefinitionSecret', dict]] = attr.ib( default=None, converter=PropJobDefinitionSecret.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionSecret), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "Secrets"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-secrets""" p_Ulimits: typing.List[typing.Union['PropJobDefinitionUlimit', dict]] = attr.ib( default=None, converter=PropJobDefinitionUlimit.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionUlimit), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "Ulimits"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-ulimits""" p_User: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "User"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-user""" p_Vcpus: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: "Vcpus"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-vcpus""" p_Volumes: typing.List[typing.Union['PropJobDefinitionVolumes', dict]] = attr.ib( default=None, converter=PropJobDefinitionVolumes.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionVolumes), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "Volumes"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-volumes""" @attr.s class PropJobDefinitionNodeRangeProperty(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.NodeRangeProperty" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-noderangeproperty.html Property Document: - ``rp_TargetNodes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-noderangeproperty.html#cfn-batch-jobdefinition-noderangeproperty-targetnodes - ``p_Container``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-noderangeproperty.html#cfn-batch-jobdefinition-noderangeproperty-container """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.NodeRangeProperty" rp_TargetNodes: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: "TargetNodes"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-noderangeproperty.html#cfn-batch-jobdefinition-noderangeproperty-targetnodes""" p_Container: typing.Union['PropJobDefinitionContainerProperties', dict] = attr.ib( default=None, converter=PropJobDefinitionContainerProperties.from_dict, validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionContainerProperties)), metadata={AttrMeta.PROPERTY_NAME: "Container"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-noderangeproperty.html#cfn-batch-jobdefinition-noderangeproperty-container""" @attr.s class PropJobDefinitionNodeProperties(Property): """ AWS Object Type = "AWS::Batch::JobDefinition.NodeProperties" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html Property Document: - ``rp_MainNode``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-mainnode - ``rp_NodeRangeProperties``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-noderangeproperties - ``rp_NumNodes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-numnodes """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.NodeProperties" rp_MainNode: int = attr.ib( default=None, validator=attr.validators.instance_of(int), metadata={AttrMeta.PROPERTY_NAME: "MainNode"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-mainnode""" rp_NodeRangeProperties: typing.List[typing.Union['PropJobDefinitionNodeRangeProperty', dict]] = attr.ib( default=None, converter=PropJobDefinitionNodeRangeProperty.from_list, validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionNodeRangeProperty), iterable_validator=attr.validators.instance_of(list)), metadata={AttrMeta.PROPERTY_NAME: "NodeRangeProperties"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-noderangeproperties""" rp_NumNodes: int = attr.ib( default=None, validator=attr.validators.instance_of(int), metadata={AttrMeta.PROPERTY_NAME: "NumNodes"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-numnodes""" #--- Resource declaration --- @attr.s class JobQueue(Resource): """ AWS Object Type = "AWS::Batch::JobQueue" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html Property Document: - ``rp_ComputeEnvironmentOrder``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-computeenvironmentorder - ``rp_Priority``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-priority - ``p_JobQueueName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-jobqueuename - ``p_SchedulingPolicyArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-schedulingpolicyarn - ``p_State``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-state - ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-tags """ AWS_OBJECT_TYPE = "AWS::Batch::JobQueue" rp_ComputeEnvironmentOrder: typing.List[typing.Union['PropJobQueueComputeEnvironmentOrder', dict]] = attr.ib( default=None, converter=PropJobQueueComputeEnvironmentOrder.from_list, validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobQueueComputeEnvironmentOrder), iterable_validator=attr.validators.instance_of(list)), metadata={AttrMeta.PROPERTY_NAME: "ComputeEnvironmentOrder"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-computeenvironmentorder""" rp_Priority: int = attr.ib( default=None, validator=attr.validators.instance_of(int), metadata={AttrMeta.PROPERTY_NAME: "Priority"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-priority""" p_JobQueueName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "JobQueueName"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-jobqueuename""" p_SchedulingPolicyArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "SchedulingPolicyArn"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-schedulingpolicyarn""" p_State: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "State"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-state""" p_Tags: dict = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(dict)), metadata={AttrMeta.PROPERTY_NAME: "Tags"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-tags""" @attr.s class JobDefinition(Resource): """ AWS Object Type = "AWS::Batch::JobDefinition" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html Property Document: - ``rp_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-type - ``p_ContainerProperties``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-containerproperties - ``p_JobDefinitionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-jobdefinitionname - ``p_NodeProperties``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-nodeproperties - ``p_Parameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-parameters - ``p_PlatformCapabilities``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-platformcapabilities - ``p_PropagateTags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-propagatetags - ``p_RetryStrategy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-retrystrategy - ``p_SchedulingPriority``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-schedulingpriority - ``p_Timeout``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-timeout - ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-tags """ AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition" rp_Type: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: "Type"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-type""" p_ContainerProperties: typing.Union['PropJobDefinitionContainerProperties', dict] = attr.ib( default=None, converter=PropJobDefinitionContainerProperties.from_dict, validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionContainerProperties)), metadata={AttrMeta.PROPERTY_NAME: "ContainerProperties"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-containerproperties""" p_JobDefinitionName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "JobDefinitionName"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-jobdefinitionname""" p_NodeProperties: typing.Union['PropJobDefinitionNodeProperties', dict] = attr.ib( default=None, converter=PropJobDefinitionNodeProperties.from_dict, validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionNodeProperties)), metadata={AttrMeta.PROPERTY_NAME: "NodeProperties"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-nodeproperties""" p_Parameters: dict = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(dict)), metadata={AttrMeta.PROPERTY_NAME: "Parameters"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-parameters""" p_PlatformCapabilities: typing.List[TypeHint.intrinsic_str] = attr.ib( default=None, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: "PlatformCapabilities"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-platformcapabilities""" p_PropagateTags: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: "PropagateTags"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-propagatetags""" p_RetryStrategy: typing.Union['PropJobDefinitionRetryStrategy', dict] = attr.ib( default=None, converter=PropJobDefinitionRetryStrategy.from_dict, validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionRetryStrategy)), metadata={AttrMeta.PROPERTY_NAME: "RetryStrategy"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-retrystrategy""" p_SchedulingPriority: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: "SchedulingPriority"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-schedulingpriority""" p_Timeout: typing.Union['PropJobDefinitionTimeout', dict] = attr.ib( default=None, converter=PropJobDefinitionTimeout.from_dict, validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionTimeout)), metadata={AttrMeta.PROPERTY_NAME: "Timeout"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-timeout""" p_Tags: dict = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(dict)), metadata={AttrMeta.PROPERTY_NAME: "Tags"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-tags""" @attr.s class SchedulingPolicy(Resource): """ AWS Object Type = "AWS::Batch::SchedulingPolicy" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html Property Document: - ``p_FairsharePolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy - ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-name - ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-tags """ AWS_OBJECT_TYPE = "AWS::Batch::SchedulingPolicy" p_FairsharePolicy: typing.Union['PropSchedulingPolicyFairsharePolicy', dict] = attr.ib( default=None, converter=PropSchedulingPolicyFairsharePolicy.from_dict, validator=attr.validators.optional(attr.validators.instance_of(PropSchedulingPolicyFairsharePolicy)), metadata={AttrMeta.PROPERTY_NAME: "FairsharePolicy"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy""" p_Name: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "Name"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-name""" p_Tags: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib( default=None, validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))), metadata={AttrMeta.PROPERTY_NAME: "Tags"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-tags""" @property def rv_Arn(self) -> GetAtt: """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#aws-resource-batch-schedulingpolicy-return-values""" return GetAtt(resource=self, attr_name="Arn") @attr.s class ComputeEnvironment(Resource): """ AWS Object Type = "AWS::Batch::ComputeEnvironment" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html Property Document: - ``rp_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-type - ``p_ComputeEnvironmentName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-computeenvironmentname - ``p_ComputeResources``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-computeresources - ``p_ServiceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-servicerole - ``p_State``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-state - ``p_UnmanagedvCpus``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-unmanagedvcpus - ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-tags """ AWS_OBJECT_TYPE = "AWS::Batch::ComputeEnvironment" rp_Type: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: "Type"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-type""" p_ComputeEnvironmentName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "ComputeEnvironmentName"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-computeenvironmentname""" p_ComputeResources: typing.Union['PropComputeEnvironmentComputeResources', dict] = attr.ib( default=None, converter=PropComputeEnvironmentComputeResources.from_dict, validator=attr.validators.optional(attr.validators.instance_of(PropComputeEnvironmentComputeResources)), metadata={AttrMeta.PROPERTY_NAME: "ComputeResources"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-computeresources""" p_ServiceRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "ServiceRole"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-servicerole""" p_State: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: "State"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-state""" p_UnmanagedvCpus: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: "UnmanagedvCpus"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-unmanagedvcpus""" p_Tags: dict = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(dict)), metadata={AttrMeta.PROPERTY_NAME: "Tags"}, ) """Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-tags"""
69.84824
268
0.779581
9,851
95,273
7.449802
0.022333
0.097618
0.043168
0.066714
0.901932
0.901483
0.887461
0.847632
0.847632
0.847632
0
0.000302
0.094917
95,273
1,363
269
69.899486
0.850771
0.331217
0
0.392996
0
0
0.091891
0.058525
0
0
0
0
0
1
0.001297
false
0
0.005188
0
0.254215
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
c8454b6a7b2868452100a1d7f2609db3537a2db9
149
py
Python
treecompare/__init__.py
mx-pycoder/treecompare
784676039a6961366c92d131bc5496e10140200b
[ "MIT" ]
null
null
null
treecompare/__init__.py
mx-pycoder/treecompare
784676039a6961366c92d131bc5496e10140200b
[ "MIT" ]
null
null
null
treecompare/__init__.py
mx-pycoder/treecompare
784676039a6961366c92d131bc5496e10140200b
[ "MIT" ]
null
null
null
# API from ._treecompare import namecomp from ._treecompare import treedups from ._treecompare import treepurge from ._treecompare import duplicate
21.285714
35
0.838926
17
149
7.117647
0.470588
0.495868
0.694215
0
0
0
0
0
0
0
0
0
0.127517
149
6
36
24.833333
0.930769
0.020134
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
c0b39ad9484f5f8d4a92121a2e472c16c9f3a7e6
49
py
Python
raspberrypi/python/python1_test.py
dambergn/programing-examples
d1086047caa52c7cc6d2e7877cbbbebd2a1cbee0
[ "MIT" ]
null
null
null
raspberrypi/python/python1_test.py
dambergn/programing-examples
d1086047caa52c7cc6d2e7877cbbbebd2a1cbee0
[ "MIT" ]
null
null
null
raspberrypi/python/python1_test.py
dambergn/programing-examples
d1086047caa52c7cc6d2e7877cbbbebd2a1cbee0
[ "MIT" ]
null
null
null
#!/usr/bin/python print 'Python1 Test Sucessfull'
24.5
31
0.77551
7
49
5.428571
1
0
0
0
0
0
0
0
0
0
0
0.022222
0.081633
49
2
31
24.5
0.822222
0.326531
0
0
0
0
0.69697
0
0
0
0
0
0
0
null
null
0
0
null
null
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
6
c0b4119e86cb14bc5067185a43c030a53cc5f2a5
26
py
Python
Python/pyworkout/modules_and_packages/menu/__init__.py
honchardev/Fun
ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc
[ "MIT" ]
null
null
null
Python/pyworkout/modules_and_packages/menu/__init__.py
honchardev/Fun
ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc
[ "MIT" ]
3
2020-03-24T16:26:35.000Z
2020-04-15T19:40:41.000Z
Python/pyworkout/modules_and_packages/menu/__init__.py
honchardev/Fun
ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc
[ "MIT" ]
null
null
null
from menu.menu import menu
26
26
0.846154
5
26
4.4
0.6
0
0
0
0
0
0
0
0
0
0
0
0.115385
26
1
26
26
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
c0c3a69cdf407b5a520a7aaf6817c943a7e8ec5f
120
py
Python
tensortrade/base/__init__.py
bwcknr/tensortrade
376f5e4cc4ad7df271774088884fbe88f8feb7d8
[ "Apache-2.0" ]
34
2020-06-05T22:39:53.000Z
2022-01-09T03:09:12.000Z
tensortrade/base/__init__.py
bwcknr/tensortrade
376f5e4cc4ad7df271774088884fbe88f8feb7d8
[ "Apache-2.0" ]
4
2020-11-13T18:48:52.000Z
2022-02-10T01:29:47.000Z
tensortrade/base/__init__.py
bwcknr/tensortrade
376f5e4cc4ad7df271774088884fbe88f8feb7d8
[ "Apache-2.0" ]
8
2020-06-01T12:09:53.000Z
2022-01-18T14:45:29.000Z
from .clock import Clock from .component import * from .context import * from .core import * from .exceptions import *
17.142857
25
0.75
16
120
5.625
0.4375
0.333333
0
0
0
0
0
0
0
0
0
0
0.175
120
6
26
20
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
8d0cdb46fef6be7ec32f19ff100a24890613d302
4,205
py
Python
tests/test_htmlreflector.py
christabor/codeReflector
21c38ebaa6a418402b9bf97cc1d1a140b10d38e6
[ "Apache-2.0" ]
3
2015-07-12T04:41:36.000Z
2015-09-18T02:28:35.000Z
tests/test_htmlreflector.py
christabor/codeReflector
21c38ebaa6a418402b9bf97cc1d1a140b10d38e6
[ "Apache-2.0" ]
null
null
null
tests/test_htmlreflector.py
christabor/codeReflector
21c38ebaa6a418402b9bf97cc1d1a140b10d38e6
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- __author__ = """Chris Tabor (dxdstudio@gmail.com)""" import unittest from code_reflector import html_reflector class SelectorOutputTestCase(unittest.TestCase): def setUp(self): self.ref = html_reflector.HTMLReflector() def test_single_class(self): res = self.ref.process_string('.foo {}').extract().make_html( save_as_string=True) self.assertEqual(res, '<div class="foo"></div>') def test_single_id(self): res = self.ref.process_string('#foo {}').extract().make_html( save_as_string=True) self.assertEqual(res, '<div id="foo"></div>') def test_pseudoselector(self): res = self.ref.process_string('#foo:hover {}').extract().make_html( save_as_string=True) self.assertEqual(res, '') def test_pseudoselector_mixed(self): res = self.ref.process_string( '#foo:hover {} #bar {}').extract().make_html( save_as_string=True) self.assertEqual(res, '<div id="bar"></div>') def test_nested_id(self): res = self.ref.process_string('#foo #bar #bim {}').extract().make_html( save_as_string=True) expected = ('<div id="foo"><div id="bar"><div id="bim">' '</div></div></div>') self.assertEqual(res, expected) def test_nested_class(self): res = self.ref.process_string('.foo .bar .bim {}').extract().make_html( save_as_string=True) expected = ('<div class="foo"><div class="bar"><div class="bim">' '</div></div></div>') self.assertEqual(res, expected) def test_compound_class_id(self): res = self.ref.process_string('.foo#bar {}').extract().make_html( save_as_string=True) expected = ('<div id="bar" class="foo"></div>') self.assertEqual(res, expected) def test_compound_multiclass(self): res = self.ref.process_string('.foo.bar.bim {}').extract().make_html( save_as_string=True) expected = ('<div class="foo bar bim"></div>') self.assertEqual(res, expected) def test_compound_id_multiclass(self): res = self.ref.process_string('#foo.bar.bim {}').extract().make_html( save_as_string=True) expected = ('<div id="foo" class="bar bim"></div>') self.assertEqual(res, expected) def test_compound_id_class(self): res = self.ref.process_string('#foo.bar {}').extract().make_html( save_as_string=True) expected = ('<div id="foo" class="bar"></div>') self.assertEqual(res, expected) def test_nested_simple_class(self): res = self.ref.process_string('.foo>.bar {}').extract().make_html( save_as_string=True) expected = ('<div class="foo"><div class="bar"></div></div>') self.assertEqual(res, expected) def test_nested_simple_id(self): res = self.ref.process_string('#foo>#bar {}').extract().make_html( save_as_string=True) expected = ('<div id="foo"><div id="bar"></div></div>') self.assertEqual(res, expected) def test_nested_simple_id_spaces(self): res = self.ref.process_string('#foo > #bar {}').extract().make_html( save_as_string=True) expected = ('<div id="foo"><div id="bar"></div></div>') self.assertEqual(res, expected) def test_nested_multiid_multiclass_tag(self): res = self.ref.process_string( '.foo > .bar > section#bam section.quux {}').extract().make_html( save_as_string=True) expected = ('<div class="foo"><div class="bar"><section id="bam">' '<section class="quux"></section></section></div></div>') self.assertEqual(res, expected) def test_nested_multiid_multiclass_tag_mixedspaces(self): res = self.ref.process_string( '.foo > .bar>section#bam section.quux {}').extract().make_html( save_as_string=True) expected = ('<div class="foo"><div class="bar"><section id="bam">' '<section class="quux"></section></section></div></div>') self.assertEqual(res, expected)
39.669811
79
0.597146
520
4,205
4.621154
0.103846
0.046608
0.068664
0.087391
0.862672
0.862672
0.862672
0.862672
0.80774
0.782355
0
0.00031
0.231867
4,205
105
80
40.047619
0.743653
0.004994
0
0.511905
0
0
0.226208
0.042324
0
0
0
0
0.178571
1
0.190476
false
0
0.02381
0
0.22619
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
23e314e87d1ce2b38f17668f757e05eb0bc939b5
529
py
Python
running_modes/enums/__init__.py
marco-foscato/Lib-INVENT
fe6a65ab7165abd87b25752a6b4208c8703d11f7
[ "Apache-2.0" ]
26
2021-04-30T23:21:17.000Z
2022-03-10T06:33:11.000Z
running_modes/enums/__init__.py
marco-foscato/Lib-INVENT
fe6a65ab7165abd87b25752a6b4208c8703d11f7
[ "Apache-2.0" ]
6
2021-10-03T08:35:48.000Z
2022-03-24T09:57:39.000Z
running_modes/enums/__init__.py
marco-foscato/Lib-INVENT
fe6a65ab7165abd87b25752a6b4208c8703d11f7
[ "Apache-2.0" ]
10
2021-04-28T14:08:17.000Z
2022-03-04T04:18:13.000Z
from running_modes.enums.diversity_filter_enum import DiversityFilterEnum from running_modes.enums.learning_strategy_enum import LearningStrategyEnum from running_modes.enums.logging_mode_enum import LoggingModeEnum from running_modes.enums.running_mode_enum import RunningModeEnum from running_modes.enums.generative_model_regime import GenerativeModelRegimeEnum from running_modes.enums.generative_model_parameters import GenerativeModelParametersEnum from running_modes.enums.scoring_strategy_enum import ScoringStrategyEnum
58.777778
89
0.918715
63
529
7.380952
0.380952
0.165591
0.24086
0.316129
0.154839
0.154839
0
0
0
0
0
0
0.05482
529
8
90
66.125
0.93
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
23fa09c32208a388fee77f1efd42018f79e4deb4
9,971
py
Python
wechat.py
TANG617/2021-C-Homework
2056ae75c927ad1b5f96ef9c60e5f81af5910213
[ "MIT" ]
null
null
null
wechat.py
TANG617/2021-C-Homework
2056ae75c927ad1b5f96ef9c60e5f81af5910213
[ "MIT" ]
null
null
null
wechat.py
TANG617/2021-C-Homework
2056ae75c927ad1b5f96ef9c60e5f81af5910213
[ "MIT" ]
null
null
null
data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAEYANIDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD0OilopFiUUuKMUAJSgUUoAoASlA96UCloEIKUdaKKAsLRQKUCgQCiilFACUUtFAgFKKAKXFMAFKBQBRjmgApRRSgUAA604UlKKACmtTwKQigCKilxRQBXpRRgU4DikaCCkIp1GKAG4pQKXFGKAAUUoFFAmJRS4o7/AEoFYBRTd42k5HvzTWuYU4Z1HGetLmQWZKKKiFxGxAV1JPTBpWlVRnHFO4iSlFNVlOCKkAzQIQdaWlApQKYCDrS4pQKUCgBAKKXFKB2oAQdacBRjFOA4oAQU00/HFIRQBHiinYooAq4pQKUiikaiYpQKUClAoENxRTsUY/OgBtIzBR1x9agv9QtNMtzcXtxFBEP4pGAry3xp8T7A2klppc7SSONvmQtwvvmolJ7RKUb6s67xF4+0jw6ALh2eQ9EiAJ/nXnOu/GS4nJTSrYxoRjdLwR+FeXXFxLcStJLI7sTkljkmoDTUe5Ln2NmbxRrU87TNqVyGY5O2QgflVy38aavHEY5buZ+6sHwQa5qlqnCL6EczN0+MNcYf8f0m4dGB5Fa9v8TvEVvHCpuTII87tx+97VxeaKXKg5mewaF8XsFY9SjCKRjKdjXa2HxN0C8ljiFwqOR/GwAr5qop2C59jWV1HeQLNFIroe6nOKtAV8zeBviBceF5Tb3LSS2D8lQfmU+1e4eHfHmieIIA8F5HHJnaYnOG+tAHVAUoFRwzJOu+Nww65B61OBTENApcU4ClAoAaBSgUuKMUAJikIp2KMUANxRTsUUAVCtG2n4oxSNRoFGKdijFACAVHM4jQsegGeKlOQOBn26V5V8Q/H1/osxtrWARyZ4duQaiTeyKUU9Wcl8TNVudTnkEMYS1jOGyeSa8yJrQ1XWbzWLgzXcgZyT0GBWeaqCsjOTuxKKKKogKKKKACiiigAooooAKkhmkgcSRSMjjoynBFR0UAeqeBfivcaZPHZ6yfNtThRN0Ke59q9+tLiK8to7iCRZYpF3KynIIr4uBIHHBr0/4Z/ES60rUItL1C4Q2Ux2q0uf3Z7c54Bp7j3PonFKBTLdvMiV8g7hnI6VMBSEMxSgU7FKBQAzFGOKkxSEUAR4oqTFFAFIiin4oxSNRoFFKRSgUDsNIyOa8X+MGtaakQ05LeOS7YZ3k8oPwr1jXr9NK0S7vZHVFijJ3OeM18l6rfy6lqU91K7O0jEgn0rNrmlYJNJFI9aKO9PjiaQ8DNat23Mkm3ZDKlht5J2ARSTWpZaO07jcOPSuw07Q1RVASuWti4w2O6hgZ1Hd7HFLodwwyRUb6RcIcba9Xi0ZQOVFRTaDGzZ21zRxze52PLo20PJJLSaPlkOPWodjZxivT7nw/n/lnWTceGyWzs4+lbRxcWc08BJbHDbGz0NOMLjAKnmu1GgBWX5entViXQA+10XoOar61En6hM8/KkdRSYrs7rRFCn5f0rAu9LeLlQee1awrxkY1MLOBlmlHGDnHvQ6lWIPUUVscux9MfCDWbjWPCSmeXzXt3MZyefb9MV6Lj3rwn4EhRql6IpXAMJ3IScMfavesU2rCGAUuKdilApDGgUYp2KMUANxRTsUUAU8UYp2KMUGw3FGKcBRjijoM8s+NmqxWfh63sdpeW6c8Z6Ad6+ejXs/wAeljGo6W3mAyeUQU3dB64rxg9aiBE2TWsDXE6xqM5rq7TRgoXjNUfDFp5srPjpXdwW6hRgVwYvEOMuVHq4HCxcOeRWstNEWPl5rorO2wV7YqGCA4zitS2XbxivInJyd2e1GKSLAhBAp/2demBVhEyBxQY27U0S0VWs1f8Ah4qN9NjYfdFaKgqKacuelWnYXKjM/smL+6B+FB0tNuMVqKh4qcR8ZNKUmwsjmLjSEZDxWDfaCG5C5wc13siL/dzVCaJc9KI1JRehMqcZbnifiHSms594XAPWsEmvXvFelpPYvIF5UV5NLGVkK4717eEr+0jqeBjsP7Od0en/AAR1P7L4oWzY/LOCB9SP/rV9HgcCvlz4TIw8a2uCodPmAZsZGQOnevqQdBXbLZHniYpQKUUYqRiYoxThSEUAJiilxRQBUApcUuKMUG4mKMe1OAoxStoM8L+O1i4vbG9CgIUKEk8k+3514v1P619I/GvSReeFYb7bn7G5/wDHuK+dbaLzrhIwPvECoi7J3JcXKSR23hW022ivjrzmumeZLbgkFj0FV9MtPs9qigdAKtQacHkaWQZYngV487TqNs+jpxcaaihkOsSxHJTK1oW3iS3L7ZUKD+9VmDSYH4ZV/Gi60G3ER2qM1H7uxajURuWV5b3KZjbnvmribDxXF2ytZyfK2AtbNvqBOATWMmlsaJSe5uPGmM5oRY+5qqJ90fHpVY3mxsNxQpFKOhqhIyeuac0kKKdxA+prClu3IOxvxrOeCe5k5kJH1q9GZtPodDJd2vIDg1RaaO4chGzgVXi0SRhuViMU6TTZYkJHDetDpq2hnzSTM7WI9+nTY9K8evoP9JbHBzXtSq08LRSL8xyK8m12D7JqkkZ7E/lXVgHabicWYx5oKR2vwg09p/ESyvCJFhXO7HQ19EqOBXlvwZ01odHnvJEwZSVVsdQK9Ur2p6WPBW4mKUClFFSMTFGKcBRigBuKKdiigCnSilxRig6BMUooo7e9DAyfE2lrrPhy+sGGfNiO3/ewf8K+UtFtC2vQwkZIcgj0wcV9O+K/GNn4dK2YAlv5VyIi+0KDxkmvA9Gtw3ja4cxLHyz7F6DJz/WuSdSPvJHRChP3ZtaXO2EYRB7cVG94kA5bH1NTsGYEAYFYt3ppunKsz7SP4Sa8jR/Ez6GMXHZF0eJ7SB8PcIMdec4/KhvHOkEbReIx/wB0/wCFZlv4ThjfemORg7uh9qjh8AWSXAm8yZ8Nu2cAetawpUGruTIqVMRf3IqxvQ3tvf5MTDd1xirCIRIozUFp4cK6sl8HZeCGjA4IxWy9uAcBcYrCrGKfuu5tBya95WLVrbu4xmm3dgV5bP1q1pqN0NWbmJmGOorNPQVrM5eZhFkFqhg1W3jlAeVc+5Aq7q2mSzW0iRfLIwwpx0rg9T8G3N3MrR3Sq4GGD5wT7VrRpqb952Jq1ZQXuq56hbajGyjbIpHs2atm4jkXGQcivO7Hw0LDSjBvke7frIHIx6Yq5pkOs2bbZrrz4wf4xzVzpqO0rkxk6i95WOplhAbcOgrybxtbn/hKlt4wS02wAeu44r1yBjJCN2eR3NcFr4js/iRpl3JA80caKzIuCTyemT1rbAte2uzhx8G6LUT3Twvoy6LoltZqMbEGfyra24qK1uob23W5t3DRv7YK+x+lT17bd2eByuO4gFKKUUUiRMUYpcUoFADcUU7FFAFLFKKKKDcMUY/P/IpRRnH04oew+p4x8QraLUviBcFeBBbpFIffk/1rmNLgEfiSblSdnBrsPiLC1n4kv5Yxhpoo3z+BX+lcb4dtJU1BrqRskrtryp3cpXPpI008NCUUdQF3GrUFsrdRUCnDVeg7GvPcTsg00TJa46CrCW4JAx1qaFQ+KtIgHahoG7FZkES4NZ0lwjS7R61Jq10UAjj+85wKrw26QQ72fJHWs27sfL1NqyXaAQOtW5CAvNULHUoCgVQD75q81zHIAuACeKbs1ozGSd9g2RyJ0FZt1pqFi23rViQPb/OhymeRVyF0mX14pxFqtTESyVTxxU32AEZxWsYB2HFOCgCm13DnMXymiJU1y2r6adQ8UwEZwsKgkHp8xrubqMYyPWubjuRH4nWNuQwC9K1otqWhm1znYfDM3C6RqEFzKX8q9ZUJ7LtFdvWF4Usvselyy4x9omMv6Yrexjj0r34fCj5rEu9ZiUooxSiqOcTFGKWlAoENxRTsUUAUBRQKKDdBRzRRQM4n4j6Q9xZQatDEZFgBjuVX7wjPRh9DXndlbJHH5sTyJlvut3Fe97Q6MrKGRhgqehHcfSvMPFnhaXR5vtFlDJNpr5YeWCTEfQjuK4cTSe8T18BjUoOjNmSvXNXrdgDVCLLRqSCDjoRirKEjBrypxaep6lJ3RsW7g5q3vyMD0rIhlORV+OUY5P1qHI1aMHXhNGVuI1Zgh5Ark9XhvtWmjks76aEKuDHkjHvxXod1dW8aEMQfasWaVJWOxAFPFFOai7oU3zQ5TI0iw1S1hVZZHlB6OWyan1vRdd1CKNrPU3tlTkoGK5PrkV0Uce62VVwcelSvvjVeCT9atVLS5kkQ4Nx5X+Zl6FcXkGmC1vbn7Tcfd3ZzmuksiyKob0qraS2xkwyIr9iBWiUAwV6VEpJu49EuUuo2RQcZqFG4pxYGqUro52rFW4bDHHIx3rJ07RGvdZR3ZWd2+VVHIHc1qXRCxN6EV1XhzTUtLJbkr+/mH5CuzB0ud8zOTE4lUoPuzXjjWKJIkGFQYFSUClFeweDJtu7AUUo/Wg0EsAKUCilFBIYooooAzaKDRQbiiikpRQMKVWI6NikpRQBwXjqyEGo291GgRZ0w23+8Olc2jEKATyOtel+JtMbVNDmiiANxF+9hHqw7fzry5JMjJGPUeleRjYOM7nuZfVvDle5djfFQXd9JGu2PqTRHIBxTZI1kB4FeZLluerq0c7eaxDb3R86bNQt4kE2EtInk91UmtubRrfG4xIx9StOtYVtWDJtAHYCt4eytqKCne10jKtfEd3DMTKr7ccBoz1q2niedW3PC+PeMitlb23L4kZM+hq6uyZR5ewj6VpL2LWh0NWWrMKbXbKe28yOQxyZ6GtfRtXecLG7btw61FPoNpc5M0SMT3C06w0lNPnBjOU9PSueXKtjkcdTpo5BkLnk1ORkVUh4ANTNKFQkkj/CnBOTsjGrZFjTbdb3V4YZF3Io3sD6D/wCvXbDGOAAPasHw1ZslvJduMPMdq/7orer38PDkppHzmKnz1H5CilHWkApwFbnMA6mlxRilFAmJSijvS0EiUUtFAGZmim5o3UHQOzSg0zdRuoAfmjimbqUGgCRTtII7HpXl/jPT003XS0ORHcJ5mP7p7/rmvTAcnA6njFed/EW5gl1K1ijlVpoYiJFU8rkgrn8q5MXbksdeDclU0OUWfnk1ZSccc1gyXGxiQeM1NBeBuAa8OpTaPoYVNNToEkDcVJ9lWXqOtULeRSQd1bEDrt4OaxTaNk7lM6LBLJlkz6Yq/bWC24wucD1qYSYI9KtxsCMU27i2Ix0GaGFPcKBkGqs06xjJNJEtlhZduBnir+k2cmr3wiH+oT5pG/pXNm98x/kzxXovhSJYvDtu20Bn3M5xyTnj9K9XAUVJ80jysfWcY2j1NyNFjRUQYVRgD2p9NzSg17B4bQ4Uopo604UEjqUUgpRQJhiilFGKCRKKXFFAGIWo3UwmkNBsSbqN1R54ozQMk3Ub6jzSEnFDGSSTrBbTzsQBHGz5PAGBkfqBXzV4a1KfUX1K7u5WkuJ5A7FjnJOa9D+Lni2fToofD9nIY3uU8y4dTyE9K8fsLw6ZfsRxG3Brjrpzi0jpw7UaibZ19ymTVElozlTgirUd3HcRgg9RUMig9K8xdme21fVEsOpyRfezkVr2WvqpG8/jXP8Al5NKEAqJU4suMpI7Ya3AyjLDPrUqa5CozvriUjzjk1cit8kd6xdJLqae0fY6eXxCjfcBY1QNzNdSbmPHYVVgtTnAXFalvbhSPao0WwneW5Naw7RzXKXfj/XvB3jOZNPujJZhlZrObmN8jn6H3GOa7QYRcn0ryPx6M+JhIvRkWu/L5P21mefmUbUbo+pfDHiKy8VaBb6tYE+VNwyE/NG/dTWwDnpXiXwBu5Fl8QaeSfJCQ3Cr6NyD/SvbAa9w8GLuSCnDrUYp460CY8daXvTR1pw60C0Fpw6U2lHWmSxaKKKBHPUU49KbSNxDSA07BYgLkk9O34VxHjD4laX4WkNnCn2/Usf6lGwsf+8alySKSudrkAbmIVR95mIAArkNc+Jnh/R45Vtrlb+7TgRQ8qD/ALRFeQ6/4+1/XbeVLy98qB84gtxtUD0J71yKTbUKDgVhKcpF2jE2fFGu3XiLW5NUvPLEjgKAg4VR0FYk77lGTyAM8015M981CWPXjmiKI5i3ZanLZtjOY+6mugttVjnXIbP+yTyK46QA9KSOZo+QSD2NTUw8Z69Tpo42VPTdHoMUyPjB69qtKisMHrXE22puuMtn3rctdVJUE151XDzierRxlOodRb2gYVpR26riuftNWXHetSPUC33QfxrjlGXU7eaPQ1YwFqYSqnU1k/anIxx+FQzX8VuuZJAufXvSjBsHUSRpXd7tQ/Njjr6V5t4h/wCJjqkbKRtTqfWtTUNb88lIchfU96xg3zZzkmvRwtJwlzvc8nG4iM17NHY/DrxJa+EfEqz3eVs7xVt52U/c5yrH2ya+jUIZFYMrowBVl6MPWvkhdpiKOoYN1B7jvXoHgX4mS+F400vWXkudGUfupwN0ltz0OPvL+or1YSvozx3Gx72BinjrVayu7bULOG8sp0ntplDRyIeGFWBVk3JFp3emCng0CYtKKBThimSFFLxRQBz2D/niue1vxpoOggrc3olmxnyIfmb/AOtXkmv/ABH1zWlaHz/stv8A887cbcj3PeuLknLscsSSeprmlUk9tDr5Yx8z0TxF8W9SvYpINKhWygIwZCcyV5YZGeV5ZGZ5HJLMxySanuHIi2jqaqg5FSk+pMpNhM2QPSkK/LkU6RMqtPVQy47imRcqSZPPeoRJzhulWZI2UkgZ9R61XaPfyv5VcbC3EcY5XpTQu+nID0PFNZChzVgNO6M1LFdvEQVP4URzKTtccVKbeJxkNj2pNraSBXWsWX7TXmhHzAflWgvimRRhdo/CufFiTyrinLYPnk1hKjRlqdEMVWStc2ZfEVzL/wAtmP8AuDFVXvZJTliW985qslkVOSaspGidTU+zhHZClWqS3Y9GZ6mXg89ahMgUfLzUke5sZ60NGdycNmh5dqkDvximE7R71Eo3SDNVFdSWzsfCfjDVPCCpHp8ym1eQPJbTfMhHfHdfqK9i8M/E/RddlS0u86bfsM7JmHlv/uvnn8a+eGYjaKVJgMpIoZD1BGRj6GtFNp6g0j6/UjaGXBU9CDkU4V80eHfHXiDwrDt066+1WQbebO7+YfRG6qPpXtXhD4h6L4tRYYna01IL89nOQG99v94VsmpENNHX96cOtIBjg0oqiLi0UtFAXPi09KjK5OcU8mk61yG7ZBL8zgAdKiYEMAetSnGTTYxvkyeg6U7BuKV3UoXB6U8qSaXGKL9BEbIHHpVSaAh9ycH+dXip65pD83BouIzvlY8ja1PCfwnpVowo/UZqI2jKcozD2PSncaK0lrxlelQmORDxmtEedGPnQEe1IZ4v4lI+opqTCyKazyoccmpVuZGPQ1P5lsTgMPyqUGDsRSbXYNSuJJXXvUsccjYz0qdWTtg/Snbj/Ch+pqPQH5gkW3rUoYDoKaFkf73FPAC0eo/QQKTyaRT+8pWbI4psYO+mnoCJ5DgioiTup0hJcClI7UAyaCYrx2qQS7JFfLKVO5XRirKfYjkH3qoBin5yvNO7T0Fc9Z8I/Fm50e3itNc82+tA6qlyDukhX1b+8Bxz1r2jTdRs9WsY7zT7mO5tpACkkbZFfIltKQCpP9a2vDniXVfCl9JdaNMsZlGJYJBmKTHcr1BHqPWtozuQ4rofVm8DrjP0orxaP4+ERqJvDMvm4G/bcLjPfHtRWlxcrPFGJpP4aR+oUUSHC4rlaNLkLnjGKljTaoPc1HGm993pVgjApAvMaaB0pOvagdaAHYzTWTj3p/f3oNK47EWw468ihSVNP70jdcUBYUtxyKglCrG77chQTUxHPWo7n/j2l/3D/Kmtwa0HQ6Xf3GlnUY7CMwbGkVTcxiWRFzudIid7oNrZZVIGx8n5WxFKl5Z6XZ6lNprx2V48iW0rNgSmPbv28ZIBYDPTOR1BxqWFrdWvhddUt9Qsbi6e0ngRJtTgVrG3JkWRFhdw7SOGkwApAV8jc7gx072ID4c6O32i0Zxqd47QrcxtKqvHAqlow28AmGTkjsP7y535ImPMyfWNP1Hw/vGoQWKyJKYXhh1O3mkRxnIZI2Zlxgg5HB46mkmt9WttbGjXGkvDqGxXaCWQIY1ZBJlyeEAQ7mLEbQDuxg40vG95FqNvqF9eXGjXF3c6mJtPm06KGNpLZhKZGkSMl1JJgIE5LjkZzvqS7aCz8dQCW9sWWbw+tkssN5FLGJjpv2cKzoxVf3o2ksQAPmJC80ezj2DnkZq2Gsz65a6Nb6X9ovrvBt1trhJUmHI3LIpKFQVYFgcLtbJG04q6Yl7q/mta20CxxYDy3N5HbxqTnau+Qqu44Yhc5IViBgHG5pjQW/jXwFaS3tiraV5X22X7ZEYYsXk05/e7tjfu3U8MeTt+9xWPplobnw9rOhJc2KX/APaFtMomvYYo2SJLhHKyswjbBlTGGOQcjIBIPZx7BzyZGkeqNBqc39kzrHpe0XzP8v2clxGFbIGGLHG3rweMA4jie+GjvrA05zp6XC2puC2FMrKWCDjk7VJOOnGcZGdye5ttW1X4j6haXlp9mvEle2M1wkLTBr2KYbEchmOyNzgDPAHUgHHsogfhzrDfaLRXOp2brC1zGsrKkc6sVjLbyAZo+QO5/utg9nEOZkyQanNosusLYRiyiTzGLXUauU3iPesZO9k3sF3AEZyM5BwfZ9X+26TZtpMiXOrJG9jG8gUzLI5RDz0DMDgnHGD0INa2u3lhfeHpZ3GlGAaVYxWbxPGbt7yNII5N4yZVUIk64IWI7VYAsys1fz00u9+Ht82oWMa2kUbSyLKtz9nIvJZsyRxtu4SRG28E8jgg4fJEOZlWaz1KK7trQQWM81xuKC11S3nChRlmdo2IjUDJLMQAATnAOJItM1iS9mtVs7QGK3+1NM2owLAYt4j3rMW8thvYLwx+bI6ggaUT7dR0tbp/D66xKl/DdfZrm1t4GtHtwqL5kP7lJTuuApbJDFN4K7ap3c+hah4w8J6VKkEWl2Pk2V7i6LRBTcyO/wC+yN2FlwzrtUsGKYTaaOSIczMjUprvQtWudOvrWNLu3fZKiXCSBW7j this.src='../images/defaultHeader.jpg' /images/small_white.png
2,492.75
9,907
0.978237
210
9,971
46.442857
0.990476
0
0
0
0
0
0
0
0
0
0
0.146368
0.000301
9,971
4
9,908
2,492.75
0.832063
0
0
0
0
0
0.002708
0.002708
0
1
0
0
0
0
null
null
0
0
null
null
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
1
0
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
6
f1d4a924e8a5953f6e4d0b1a281f439de7754890
106
py
Python
netpbmfile/__init__.py
cgohlke/netpbmfile
4c84636d5535c44aa2e91fbc315b9a5282ecdf20
[ "BSD-3-Clause" ]
4
2020-02-23T20:18:01.000Z
2022-03-05T09:47:55.000Z
netpbmfile/__init__.py
cgohlke/netpbmfile
4c84636d5535c44aa2e91fbc315b9a5282ecdf20
[ "BSD-3-Clause" ]
null
null
null
netpbmfile/__init__.py
cgohlke/netpbmfile
4c84636d5535c44aa2e91fbc315b9a5282ecdf20
[ "BSD-3-Clause" ]
null
null
null
# netpbmfile/__init__.py from .netpbmfile import __doc__, __all__, __version__ from .netpbmfile import *
21.2
53
0.801887
12
106
5.75
0.666667
0.405797
0.57971
0
0
0
0
0
0
0
0
0
0.122642
106
4
54
26.5
0.741935
0.207547
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
f1e01161020bd19cd408163fd68e8137d22d090d
121
py
Python
ezotv/cache_tools/__init__.py
marcsello/ezotv-frontend
405c440a567e8a0f1577f10d45385f3171398afe
[ "CC0-1.0" ]
null
null
null
ezotv/cache_tools/__init__.py
marcsello/ezotv-frontend
405c440a567e8a0f1577f10d45385f3171398afe
[ "CC0-1.0" ]
7
2020-01-23T00:50:39.000Z
2020-04-18T20:34:40.000Z
ezotv/cache_tools/__init__.py
marcsello/ezotv-frontend
405c440a567e8a0f1577f10d45385f3171398afe
[ "CC0-1.0" ]
null
null
null
#!/usr/bin/env python3 from .redis_client import redis_client from .cached_base_http_session import CachedBaseHttpSession
40.333333
59
0.867769
17
121
5.882353
0.764706
0.22
0
0
0
0
0
0
0
0
0
0.008929
0.07438
121
3
59
40.333333
0.883929
0.173554
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6